[
  {
    "path": ".gitignore",
    "content": "\n*.ncb\r\n*.suo\n\r\n*.user\r\n*.sdf\r\n\n*.ipch\r\n\n*.opensdf\r\n\n*.o\n*.a\n\nobj\r\n\n*.iobj\r\n*.ipdb\r\n*.exe"
  },
  {
    "path": "build/cleanup.bat",
    "content": "echo Let's clean them up.\r\n\r\necho Cleaning vs2005\\...\r\ndel vs2005\\*.ncb\r\ndel vs2005\\*.user\r\ndel vs2005\\*.suo /a:h\r\nrmdir vs2005\\obj /s /q\r\n\r\necho Cleaning vs2008\\...\r\ndel vs2008\\*.ncb\r\ndel vs2008\\*.user\r\ndel vs2008\\*.suo /a:h\r\nrmdir vs2008\\obj /s /q\r\n\r\necho Cleaning vs2010\\...\r\ndel vs2010\\*.sdf\r\ndel vs2010\\*.user\r\ndel vs2010\\*.suo /a:h\r\nrmdir vs2010\\ipch /s /q\r\nrmdir vs2010\\obj /s /q\r\n\r\necho Cleaning vs2012\\...\r\ndel vs2012\\*.sdf\r\ndel vs2012\\*.user\r\ndel vs2012\\*.suo /a:h\r\nrmdir vs2012\\ipch /s /q\r\nrmdir vs2012\\obj /s /q\r\n\r\necho Cleaning vs2013\\...\r\ndel vs2013\\*.sdf\r\ndel vs2013\\*.user\r\ndel vs2013\\*.suo /a:h\r\nrmdir vs2013\\ipch /s /q\r\nrmdir vs2013\\obj /s /q\r\n\r\necho Cleaning vs2015\\...\r\ndel vs2015\\*.sdf\r\ndel vs2015\\*.user\r\ndel vs2015\\*.suo /a:h\r\nrmdir vs2015\\.vs /s /q\r\nrmdir vs2015\\obj /s /q\r\n\r\necho Finished. Yeah !\r\n"
  },
  {
    "path": "build/unix/makefile",
    "content": "#sudo apt-get install build-essential\n#sudo apt-get install ffmpeg\n#sudo apt-get install libav-tools\n#sudo apt-get install libopencv-dev\n\nCXX           = g++\nCXXFLAGS      = -O2 -Wall -D__STDC_CONSTANT_MACROS `pkg-config --libs --cflags opencv`\nLIBS          = -lm                     \\\n                -lpthread               \\\n                -lavutil                \\\n                -lavformat              \\\n                -lavcodec               \\\n                -lswscale\nOBJS          = ../../src/ardrone/ardrone.o \\\n                ../../src/ardrone/command.o \\\n                ../../src/ardrone/config.o  \\\n                ../../src/ardrone/udp.o     \\\n                ../../src/ardrone/tcp.o     \\\n                ../../src/ardrone/navdata.o \\\n                ../../src/ardrone/version.o \\\n                ../../src/ardrone/video.o   \\\n                ../../src/main.o\nPROGRAM       = test.a\n\n$(PROGRAM):     $(OBJS)\n\t\t$(CXX) $(OBJS) -o $(PROGRAM) $(CXXFLAGS) $(LDFLAGS) $(LIBS) \n\nclean:;         rm -f *.o *~ $(PROGRAM) $(OBJS)\n\ninstall:        $(PROGRAM)\n\t\tinstall -s $(PROGRAM) $(DEST)\n"
  },
  {
    "path": "build/vs2010/test.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 11.00\r\n# Visual Studio 2010\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"test\", \"test.vcxproj\", \"{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "build/vs2010/test.vcxproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project DefaultTargets=\"Build\" ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n  <ItemGroup Label=\"ProjectConfigurations\">\n    <ProjectConfiguration Include=\"Release|Win32\">\n      <Configuration>Release</Configuration>\n      <Platform>Win32</Platform>\n    </ProjectConfiguration>\n    <ProjectConfiguration Include=\"Debug|Win32\">\n      <Configuration>Debug</Configuration>\n      <Platform>Win32</Platform>\n    </ProjectConfiguration>\n  </ItemGroup>\n  <PropertyGroup Label=\"Globals\">\n    <ProjectGuid>{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}</ProjectGuid>\n    <RootNamespace>player</RootNamespace>\n    <Keyword>Win32Proj</Keyword>\n  </PropertyGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.Default.props\" />\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\" Label=\"Configuration\">\n    <ConfigurationType>Application</ConfigurationType>\n    <UseOfMfc>false</UseOfMfc>\n    <CharacterSet>MultiByte</CharacterSet>\n    <WholeProgramOptimization>true</WholeProgramOptimization>\n  </PropertyGroup>\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\" Label=\"Configuration\">\n    <ConfigurationType>Application</ConfigurationType>\n    <UseOfMfc>false</UseOfMfc>\n    <CharacterSet>MultiByte</CharacterSet>\n  </PropertyGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.props\" />\n  <ImportGroup Label=\"ExtensionSettings\">\n  </ImportGroup>\n  <ImportGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\" Label=\"PropertySheets\">\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\n  </ImportGroup>\n  <ImportGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\" Label=\"PropertySheets\">\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\n  </ImportGroup>\n  <PropertyGroup Label=\"UserMacros\" />\n  <PropertyGroup>\n    <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>\n    <OutDir Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">..\\..\\bin\\vs2010\\</OutDir>\n    <IntDir Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">.\\obj\\debug\\</IntDir>\n    <LinkIncremental Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">true</LinkIncremental>\n    <OutDir Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">..\\..\\bin\\vs2010\\</OutDir>\n    <IntDir Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">.\\obj\\release\\</IntDir>\n    <LinkIncremental Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">false</LinkIncremental>\n  </PropertyGroup>\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">\n    <ClCompile>\n      <AdditionalIncludeDirectories>..\\..\\src\\3rdparty\\opencv\\include;..\\..\\src\\3rdparty\\ffmpeg\\include;..\\..\\src\\3rdparty\\pthread\\include;..\\..\\src\\3rdparty\\glut\\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n      <PreprocessorDefinitions>WIN32;NOMINMAX;NDEBUG;_CONSOLE;__STDC_CONSTANT_MACROS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n      <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\n      <PrecompiledHeader>\n      </PrecompiledHeader>\n      <WarningLevel>Level3</WarningLevel>\n      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\n      <DisableSpecificWarnings>4244;4819;4996;%(DisableSpecificWarnings)</DisableSpecificWarnings>\n    </ClCompile>\n    <Link>\n      <AdditionalDependencies>wsock32.lib;opencv_aruco310.lib;opencv_bgsegm310.lib;opencv_bioinspired310.lib;opencv_calib3d310.lib;opencv_ccalib310.lib;opencv_core310.lib;opencv_datasets310.lib;opencv_dnn310.lib;opencv_dpm310.lib;opencv_face310.lib;opencv_features2d310.lib;opencv_flann310.lib;opencv_fuzzy310.lib;opencv_highgui310.lib;opencv_imgcodecs310.lib;opencv_imgproc310.lib;opencv_line_descriptor310.lib;opencv_ml310.lib;opencv_objdetect310.lib;opencv_optflow310.lib;opencv_photo310.lib;opencv_plot310.lib;opencv_reg310.lib;opencv_rgbd310.lib;opencv_saliency310.lib;opencv_shape310.lib;opencv_stereo310.lib;opencv_stitching310.lib;opencv_structured_light310.lib;opencv_superres310.lib;opencv_surface_matching310.lib;opencv_text310.lib;opencv_tracking310.lib;opencv_video310.lib;opencv_videoio310.lib;opencv_videostab310.lib;opencv_xfeatures2d310.lib;opencv_ximgproc310.lib;opencv_xobjdetect310.lib;opencv_xphoto310.lib;avcodec.lib;avdevice.lib;avfilter.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;pthreadVC2.lib;glut32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n      <AdditionalLibraryDirectories>..\\..\\src\\3rdparty\\opencv\\lib\\vs2010;..\\..\\src\\3rdparty\\ffmpeg\\lib;..\\..\\src\\3rdparty\\pthread\\lib;..\\..\\src\\3rdparty\\glut\\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n      <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>\n      <GenerateDebugInformation>true</GenerateDebugInformation>\n      <ProgramDatabaseFile>.\\obj\\release\\release.pdb</ProgramDatabaseFile>\n      <SubSystem>Console</SubSystem>\n      <StackReserveSize>0</StackReserveSize>\n      <StackCommitSize>0</StackCommitSize>\n      <OptimizeReferences>true</OptimizeReferences>\n      <EnableCOMDATFolding>true</EnableCOMDATFolding>\n      <TargetMachine>MachineX86</TargetMachine>\n    </Link>\n    <Manifest>\n      <AdditionalManifestFiles>..\\..\\src\\resource\\test.exe.manifest;%(AdditionalManifestFiles)</AdditionalManifestFiles>\n    </Manifest>\n  </ItemDefinitionGroup>\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">\n    <ClCompile>\n      <Optimization>Disabled</Optimization>\n      <AdditionalIncludeDirectories>..\\..\\src\\3rdparty\\opencv\\include;..\\..\\src\\3rdparty\\ffmpeg\\include;..\\..\\src\\3rdparty\\pthread\\include;..\\..\\src\\3rdparty\\glut\\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n      <PreprocessorDefinitions>WIN32;NOMINMAX;_DEBUG;_CONSOLE;__STDC_CONSTANT_MACROS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n      <MinimalRebuild>true</MinimalRebuild>\n      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\n      <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>\n      <PrecompiledHeader>\n      </PrecompiledHeader>\n      <WarningLevel>Level3</WarningLevel>\n      <DebugInformationFormat>EditAndContinue</DebugInformationFormat>\n    </ClCompile>\n    <Link>\n      <AdditionalDependencies>wsock32.lib;opencv_aruco310.lib;opencv_bgsegm310.lib;opencv_bioinspired310.lib;opencv_calib3d310.lib;opencv_ccalib310.lib;opencv_core310.lib;opencv_datasets310.lib;opencv_dnn310.lib;opencv_dpm310.lib;opencv_face310.lib;opencv_features2d310.lib;opencv_flann310.lib;opencv_fuzzy310.lib;opencv_highgui310.lib;opencv_imgcodecs310.lib;opencv_imgproc310.lib;opencv_line_descriptor310.lib;opencv_ml310.lib;opencv_objdetect310.lib;opencv_optflow310.lib;opencv_photo310.lib;opencv_plot310.lib;opencv_reg310.lib;opencv_rgbd310.lib;opencv_saliency310.lib;opencv_shape310.lib;opencv_stereo310.lib;opencv_stitching310.lib;opencv_structured_light310.lib;opencv_superres310.lib;opencv_surface_matching310.lib;opencv_text310.lib;opencv_tracking310.lib;opencv_video310.lib;opencv_videoio310.lib;opencv_videostab310.lib;opencv_xfeatures2d310.lib;opencv_ximgproc310.lib;opencv_xobjdetect310.lib;opencv_xphoto310.lib;avcodec.lib;avdevice.lib;avfilter.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;pthreadVC2.lib;glut32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n      <AdditionalLibraryDirectories>..\\..\\src\\3rdparty\\opencv\\lib\\vs2010;..\\..\\src\\3rdparty\\ffmpeg\\lib;..\\..\\src\\3rdparty\\pthread\\lib;..\\..\\src\\3rdparty\\glut\\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n      <GenerateDebugInformation>true</GenerateDebugInformation>\n      <ProgramDatabaseFile>.\\obj\\debug\\debug.pdb</ProgramDatabaseFile>\n      <SubSystem>Console</SubSystem>\n      <TargetMachine>MachineX86</TargetMachine>\n    </Link>\n    <Manifest>\n      <AdditionalManifestFiles>..\\..\\src\\resource\\test.exe.manifest;%(AdditionalManifestFiles)</AdditionalManifestFiles>\n    </Manifest>\n  </ItemDefinitionGroup>\n  <ItemGroup>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\config.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\tcp.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\version.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\main.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\ardrone.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\command.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\navdata.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\udp.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\video.cpp\" />\n  </ItemGroup>\n  <ItemGroup>\n    <ClInclude Include=\"..\\..\\src\\ardrone\\ardrone.h\" />\n  </ItemGroup>\n  <ItemGroup>\n    <ResourceCompile Include=\"..\\..\\src\\resource\\resource.rc\" />\n  </ItemGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.targets\" />\n  <ImportGroup Label=\"ExtensionTargets\">\n  </ImportGroup>\n</Project>"
  },
  {
    "path": "build/vs2010/test.vcxproj.filters",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n  <ItemGroup>\n    <Filter Include=\"Source Files\">\n      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>\n      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>\n    </Filter>\n    <Filter Include=\"Source Files\\ardrone\">\n      <UniqueIdentifier>{550b0fc4-8368-48d8-b626-03df6c7d9121}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Header Files\">\n      <UniqueIdentifier>{06e2d9e1-74fe-4d29-9e94-f3ea80429740}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Header Files\\ardrone\">\n      <UniqueIdentifier>{19ba6cc5-e937-4c72-8c33-8d3581893111}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Resource Files\">\n      <UniqueIdentifier>{4cceaaea-23b5-4465-aeec-3b00075be422}</UniqueIdentifier>\n    </Filter>\n  </ItemGroup>\n  <ItemGroup>\n    <ClCompile Include=\"..\\..\\src\\main.cpp\">\n      <Filter>Source Files</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\ardrone.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\command.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\navdata.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\udp.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\video.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\tcp.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\config.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\version.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n  </ItemGroup>\n  <ItemGroup>\n    <ClInclude Include=\"..\\..\\src\\ardrone\\ardrone.h\">\n      <Filter>Header Files\\ardrone</Filter>\n    </ClInclude>\n  </ItemGroup>\n  <ItemGroup>\n    <ResourceCompile Include=\"..\\..\\src\\resource\\resource.rc\">\n      <Filter>Resource Files</Filter>\n    </ResourceCompile>\n  </ItemGroup>\n</Project>"
  },
  {
    "path": "build/vs2012/test.sln",
    "content": "﻿\r\nMicrosoft Visual Studio Solution File, Format Version 12.00\r\n# Visual Studio Express 2012 for Windows Desktop\r\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"test\", \"test.vcxproj\", \"{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}\"\r\nEndProject\r\nGlobal\r\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n\t\tRelease|Win32 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n\t\t{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}.Release|Win32.ActiveCfg = Release|Win32\r\n\t\t{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}.Release|Win32.Build.0 = Release|Win32\r\n\tEndGlobalSection\r\n\tGlobalSection(SolutionProperties) = preSolution\r\n\t\tHideSolutionNode = FALSE\r\n\tEndGlobalSection\r\nEndGlobal\r\n"
  },
  {
    "path": "build/vs2012/test.vcxproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project DefaultTargets=\"Build\" ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n  <ItemGroup Label=\"ProjectConfigurations\">\n    <ProjectConfiguration Include=\"Debug|Win32\">\n      <Configuration>Debug</Configuration>\n      <Platform>Win32</Platform>\n    </ProjectConfiguration>\n    <ProjectConfiguration Include=\"Release|Win32\">\n      <Configuration>Release</Configuration>\n      <Platform>Win32</Platform>\n    </ProjectConfiguration>\n  </ItemGroup>\n  <PropertyGroup Label=\"Globals\">\n    <ProjectGuid>{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}</ProjectGuid>\n    <RootNamespace>\n    </RootNamespace>\n    <Keyword>Win32Proj</Keyword>\n  </PropertyGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.Default.props\" />\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\" Label=\"Configuration\">\n    <ConfigurationType>Application</ConfigurationType>\n    <UseOfMfc>false</UseOfMfc>\n    <CharacterSet>MultiByte</CharacterSet>\n    <WholeProgramOptimization>true</WholeProgramOptimization>\n    <PlatformToolset>v110</PlatformToolset>\n  </PropertyGroup>\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\" Label=\"Configuration\">\n    <ConfigurationType>Application</ConfigurationType>\n    <UseOfMfc>false</UseOfMfc>\n    <CharacterSet>MultiByte</CharacterSet>\n    <PlatformToolset>v110</PlatformToolset>\n  </PropertyGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.props\" />\n  <ImportGroup Label=\"ExtensionSettings\">\n  </ImportGroup>\n  <ImportGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\" Label=\"PropertySheets\">\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\n  </ImportGroup>\n  <ImportGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\" Label=\"PropertySheets\">\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\n  </ImportGroup>\n  <PropertyGroup Label=\"UserMacros\" />\n  <PropertyGroup>\n    <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>\n    <OutDir Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">..\\..\\bin\\vs2012\\</OutDir>\n    <IntDir Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">.\\obj\\debug\\</IntDir>\n    <LinkIncremental Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">true</LinkIncremental>\n    <OutDir Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">..\\..\\bin\\vs2012\\</OutDir>\n    <IntDir Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">.\\obj\\release\\</IntDir>\n    <LinkIncremental Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">false</LinkIncremental>\n  </PropertyGroup>\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">\n    <ClCompile>\n      <Optimization>Disabled</Optimization>\n      <AdditionalIncludeDirectories>..\\..\\src\\3rdparty\\opencv\\include;..\\..\\src\\3rdparty\\ffmpeg\\include;..\\..\\src\\3rdparty\\pthread\\include;..\\..\\src\\3rdparty\\glut\\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n      <PreprocessorDefinitions>WIN32;NOMINMAX;_DEBUG;_CONSOLE;__STDC_CONSTANT_MACROS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n      <MinimalRebuild>true</MinimalRebuild>\n      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\n      <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>\n      <PrecompiledHeader>\n      </PrecompiledHeader>\n      <WarningLevel>Level3</WarningLevel>\n      <DebugInformationFormat>EditAndContinue</DebugInformationFormat>\n    </ClCompile>\n    <Link>\n      <AdditionalDependencies>wsock32.lib;opencv_aruco310.lib;opencv_bgsegm310.lib;opencv_bioinspired310.lib;opencv_calib3d310.lib;opencv_ccalib310.lib;opencv_core310.lib;opencv_datasets310.lib;opencv_dnn310.lib;opencv_dpm310.lib;opencv_face310.lib;opencv_features2d310.lib;opencv_flann310.lib;opencv_fuzzy310.lib;opencv_highgui310.lib;opencv_imgcodecs310.lib;opencv_imgproc310.lib;opencv_line_descriptor310.lib;opencv_ml310.lib;opencv_objdetect310.lib;opencv_optflow310.lib;opencv_photo310.lib;opencv_plot310.lib;opencv_reg310.lib;opencv_rgbd310.lib;opencv_saliency310.lib;opencv_shape310.lib;opencv_stereo310.lib;opencv_stitching310.lib;opencv_structured_light310.lib;opencv_superres310.lib;opencv_surface_matching310.lib;opencv_text310.lib;opencv_tracking310.lib;opencv_video310.lib;opencv_videoio310.lib;opencv_videostab310.lib;opencv_xfeatures2d310.lib;opencv_ximgproc310.lib;opencv_xobjdetect310.lib;opencv_xphoto310.lib;avcodec.lib;avdevice.lib;avfilter.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;pthreadVC2.lib;glut32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n      <AdditionalLibraryDirectories>..\\..\\src\\3rdparty\\opencv\\lib\\vs2012;..\\..\\src\\3rdparty\\ffmpeg\\lib;..\\..\\src\\3rdparty\\pthread\\lib;..\\..\\src\\3rdparty\\glut\\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n      <GenerateDebugInformation>true</GenerateDebugInformation>\n      <ProgramDatabaseFile>.\\obj\\debug\\debug.pdb</ProgramDatabaseFile>\n      <SubSystem>Console</SubSystem>\n      <TargetMachine>MachineX86</TargetMachine>\n    </Link>\n    <Manifest>\n      <AdditionalManifestFiles>..\\..\\src\\resource\\test.exe.manifest;%(AdditionalManifestFiles)</AdditionalManifestFiles>\n    </Manifest>\n  </ItemDefinitionGroup>\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">\n    <ClCompile>\n      <AdditionalIncludeDirectories>..\\..\\src\\3rdparty\\opencv\\include;..\\..\\src\\3rdparty\\ffmpeg\\include;..\\..\\src\\3rdparty\\pthread\\include;..\\..\\src\\3rdparty\\glut\\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n      <PreprocessorDefinitions>WIN32;NOMINMAX;NDEBUG;_CONSOLE;__STDC_CONSTANT_MACROS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n      <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\n      <PrecompiledHeader>\n      </PrecompiledHeader>\n      <WarningLevel>Level3</WarningLevel>\n      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\n      <DisableSpecificWarnings>4244;4819;4996;%(DisableSpecificWarnings)</DisableSpecificWarnings>\n    </ClCompile>\n    <Link>\n      <AdditionalDependencies>wsock32.lib;opencv_aruco310.lib;opencv_bgsegm310.lib;opencv_bioinspired310.lib;opencv_calib3d310.lib;opencv_ccalib310.lib;opencv_core310.lib;opencv_datasets310.lib;opencv_dnn310.lib;opencv_dpm310.lib;opencv_face310.lib;opencv_features2d310.lib;opencv_flann310.lib;opencv_fuzzy310.lib;opencv_highgui310.lib;opencv_imgcodecs310.lib;opencv_imgproc310.lib;opencv_line_descriptor310.lib;opencv_ml310.lib;opencv_objdetect310.lib;opencv_optflow310.lib;opencv_photo310.lib;opencv_plot310.lib;opencv_reg310.lib;opencv_rgbd310.lib;opencv_saliency310.lib;opencv_shape310.lib;opencv_stereo310.lib;opencv_stitching310.lib;opencv_structured_light310.lib;opencv_superres310.lib;opencv_surface_matching310.lib;opencv_text310.lib;opencv_tracking310.lib;opencv_video310.lib;opencv_videoio310.lib;opencv_videostab310.lib;opencv_xfeatures2d310.lib;opencv_ximgproc310.lib;opencv_xobjdetect310.lib;opencv_xphoto310.lib;avcodec.lib;avdevice.lib;avfilter.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;pthreadVC2.lib;glut32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n      <AdditionalLibraryDirectories>..\\..\\src\\3rdparty\\opencv\\lib\\vs2012;..\\..\\src\\3rdparty\\ffmpeg\\lib;..\\..\\src\\3rdparty\\pthread\\lib;..\\..\\src\\3rdparty\\glut\\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n      <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>\n      <GenerateDebugInformation>true</GenerateDebugInformation>\n      <ProgramDatabaseFile>.\\obj\\release\\release.pdb</ProgramDatabaseFile>\n      <SubSystem>Console</SubSystem>\n      <StackReserveSize>0</StackReserveSize>\n      <StackCommitSize>0</StackCommitSize>\n      <OptimizeReferences>true</OptimizeReferences>\n      <EnableCOMDATFolding>true</EnableCOMDATFolding>\n      <TargetMachine>MachineX86</TargetMachine>\n    </Link>\n    <Manifest>\n      <AdditionalManifestFiles>..\\..\\src\\resource\\test.exe.manifest;%(AdditionalManifestFiles)</AdditionalManifestFiles>\n    </Manifest>\n  </ItemDefinitionGroup>\n  <ItemGroup>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\ardrone.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\command.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\config.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\navdata.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\tcp.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\udp.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\version.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\video.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\main.cpp\" />\n  </ItemGroup>\n  <ItemGroup>\n    <ClInclude Include=\"..\\..\\src\\ardrone\\ardrone.h\" />\n  </ItemGroup>\n  <ItemGroup>\n    <ResourceCompile Include=\"..\\..\\src\\resource\\resource.rc\" />\n  </ItemGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.targets\" />\n  <ImportGroup Label=\"ExtensionTargets\">\n  </ImportGroup>\n</Project>"
  },
  {
    "path": "build/vs2012/test.vcxproj.filters",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n  <ItemGroup>\n    <Filter Include=\"Source Files\">\n      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>\n      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>\n    </Filter>\n    <Filter Include=\"Source Files\\ardrone\">\n      <UniqueIdentifier>{550b0fc4-8368-48d8-b626-03df6c7d9121}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Header Files\">\n      <UniqueIdentifier>{06e2d9e1-74fe-4d29-9e94-f3ea80429740}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Header Files\\ardrone\">\n      <UniqueIdentifier>{19ba6cc5-e937-4c72-8c33-8d3581893111}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Resource Files\">\n      <UniqueIdentifier>{4cceaaea-23b5-4465-aeec-3b00075be422}</UniqueIdentifier>\n    </Filter>\n  </ItemGroup>\n  <ItemGroup>\n    <ClCompile Include=\"..\\..\\src\\main.cpp\">\n      <Filter>Source Files</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\ardrone.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\command.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\navdata.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\udp.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\video.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\tcp.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\config.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\version.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n  </ItemGroup>\n  <ItemGroup>\n    <ClInclude Include=\"..\\..\\src\\ardrone\\ardrone.h\">\n      <Filter>Header Files\\ardrone</Filter>\n    </ClInclude>\n  </ItemGroup>\n  <ItemGroup>\n    <ResourceCompile Include=\"..\\..\\src\\resource\\resource.rc\">\n      <Filter>Resource Files</Filter>\n    </ResourceCompile>\n  </ItemGroup>\n</Project>"
  },
  {
    "path": "build/vs2013/test.sln",
    "content": "﻿\nMicrosoft Visual Studio Solution File, Format Version 12.00\n# Visual Studio Express 2013 for Windows Desktop\nVisualStudioVersion = 12.0.30110.0\nMinimumVisualStudioVersion = 10.0.40219.1\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"test\", \"test.vcxproj\", \"{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}\"\nEndProject\nGlobal\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\n\t\tRelease|Win32 = Release|Win32\n\tEndGlobalSection\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\n\t\t{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}.Release|Win32.ActiveCfg = Release|Win32\n\t\t{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}.Release|Win32.Build.0 = Release|Win32\n\tEndGlobalSection\n\tGlobalSection(SolutionProperties) = preSolution\n\t\tHideSolutionNode = FALSE\n\tEndGlobalSection\nEndGlobal\n"
  },
  {
    "path": "build/vs2013/test.vcxproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project DefaultTargets=\"Build\" ToolsVersion=\"12.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n  <ItemGroup Label=\"ProjectConfigurations\">\n    <ProjectConfiguration Include=\"Debug|Win32\">\n      <Configuration>Debug</Configuration>\n      <Platform>Win32</Platform>\n    </ProjectConfiguration>\n    <ProjectConfiguration Include=\"Release|Win32\">\n      <Configuration>Release</Configuration>\n      <Platform>Win32</Platform>\n    </ProjectConfiguration>\n  </ItemGroup>\n  <PropertyGroup Label=\"Globals\">\n    <ProjectGuid>{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}</ProjectGuid>\n    <RootNamespace>\n    </RootNamespace>\n    <Keyword>Win32Proj</Keyword>\n  </PropertyGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.Default.props\" />\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\" Label=\"Configuration\">\n    <ConfigurationType>Application</ConfigurationType>\n    <UseOfMfc>false</UseOfMfc>\n    <CharacterSet>MultiByte</CharacterSet>\n    <WholeProgramOptimization>true</WholeProgramOptimization>\n    <PlatformToolset>v120</PlatformToolset>\n  </PropertyGroup>\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\" Label=\"Configuration\">\n    <ConfigurationType>Application</ConfigurationType>\n    <UseOfMfc>false</UseOfMfc>\n    <CharacterSet>MultiByte</CharacterSet>\n    <PlatformToolset>v120</PlatformToolset>\n  </PropertyGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.props\" />\n  <ImportGroup Label=\"ExtensionSettings\">\n  </ImportGroup>\n  <ImportGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\" Label=\"PropertySheets\">\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\n  </ImportGroup>\n  <ImportGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\" Label=\"PropertySheets\">\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\n  </ImportGroup>\n  <PropertyGroup Label=\"UserMacros\" />\n  <PropertyGroup>\n    <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>\n    <OutDir Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">..\\..\\bin\\vs2013\\</OutDir>\n    <IntDir Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">.\\obj\\debug\\</IntDir>\n    <LinkIncremental Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">true</LinkIncremental>\n    <OutDir Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">..\\..\\bin\\vs2013\\</OutDir>\n    <IntDir Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">.\\obj\\release\\</IntDir>\n    <LinkIncremental Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">false</LinkIncremental>\n  </PropertyGroup>\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">\n    <ClCompile>\n      <Optimization>Disabled</Optimization>\n      <AdditionalIncludeDirectories>..\\..\\src\\3rdparty\\opencv\\include;..\\..\\src\\3rdparty\\ffmpeg\\include;..\\..\\src\\3rdparty\\pthread\\include;..\\..\\src\\3rdparty\\glut\\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n      <PreprocessorDefinitions>WIN32;NOMINMAX;_DEBUG;_CONSOLE;__STDC_CONSTANT_MACROS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n      <MinimalRebuild>true</MinimalRebuild>\n      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\n      <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>\n      <PrecompiledHeader>\n      </PrecompiledHeader>\n      <WarningLevel>Level3</WarningLevel>\n      <DebugInformationFormat>EditAndContinue</DebugInformationFormat>\n    </ClCompile>\n    <Link>\n      <AdditionalDependencies>wsock32.lib;opencv_aruco310.lib;opencv_bgsegm310.lib;opencv_bioinspired310.lib;opencv_calib3d310.lib;opencv_ccalib310.lib;opencv_core310.lib;opencv_datasets310.lib;opencv_dnn310.lib;opencv_dpm310.lib;opencv_face310.lib;opencv_features2d310.lib;opencv_flann310.lib;opencv_fuzzy310.lib;opencv_highgui310.lib;opencv_imgcodecs310.lib;opencv_imgproc310.lib;opencv_line_descriptor310.lib;opencv_ml310.lib;opencv_objdetect310.lib;opencv_optflow310.lib;opencv_photo310.lib;opencv_plot310.lib;opencv_reg310.lib;opencv_rgbd310.lib;opencv_saliency310.lib;opencv_shape310.lib;opencv_stereo310.lib;opencv_stitching310.lib;opencv_structured_light310.lib;opencv_superres310.lib;opencv_surface_matching310.lib;opencv_text310.lib;opencv_tracking310.lib;opencv_video310.lib;opencv_videoio310.lib;opencv_videostab310.lib;opencv_xfeatures2d310.lib;opencv_ximgproc310.lib;opencv_xobjdetect310.lib;opencv_xphoto310.lib;avcodec.lib;avdevice.lib;avfilter.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;pthreadVC2.lib;glut32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n      <AdditionalLibraryDirectories>..\\..\\src\\3rdparty\\opencv\\lib\\vs2013;..\\..\\src\\3rdparty\\ffmpeg\\lib;..\\..\\src\\3rdparty\\pthread\\lib;..\\..\\src\\3rdparty\\glut\\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n      <GenerateDebugInformation>true</GenerateDebugInformation>\n      <ProgramDatabaseFile>.\\obj\\debug\\debug.pdb</ProgramDatabaseFile>\n      <SubSystem>Console</SubSystem>\n      <TargetMachine>MachineX86</TargetMachine>\n    </Link>\n    <Manifest>\n      <AdditionalManifestFiles>..\\..\\src\\resource\\test.exe.manifest;%(AdditionalManifestFiles)</AdditionalManifestFiles>\n    </Manifest>\n  </ItemDefinitionGroup>\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">\n    <ClCompile>\n      <AdditionalIncludeDirectories>..\\..\\src\\3rdparty\\opencv\\include;..\\..\\src\\3rdparty\\ffmpeg\\include;..\\..\\src\\3rdparty\\pthread\\include;..\\..\\src\\3rdparty\\glut\\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n      <PreprocessorDefinitions>WIN32;NOMINMAX;NDEBUG;_CONSOLE;__STDC_CONSTANT_MACROS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n      <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\n      <PrecompiledHeader>\n      </PrecompiledHeader>\n      <WarningLevel>Level3</WarningLevel>\n      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\n      <DisableSpecificWarnings>4244;4819;4996;%(DisableSpecificWarnings)</DisableSpecificWarnings>\n    </ClCompile>\n    <Link>\n      <AdditionalDependencies>wsock32.lib;opencv_aruco310.lib;opencv_bgsegm310.lib;opencv_bioinspired310.lib;opencv_calib3d310.lib;opencv_ccalib310.lib;opencv_core310.lib;opencv_datasets310.lib;opencv_dnn310.lib;opencv_dpm310.lib;opencv_face310.lib;opencv_features2d310.lib;opencv_flann310.lib;opencv_fuzzy310.lib;opencv_highgui310.lib;opencv_imgcodecs310.lib;opencv_imgproc310.lib;opencv_line_descriptor310.lib;opencv_ml310.lib;opencv_objdetect310.lib;opencv_optflow310.lib;opencv_photo310.lib;opencv_plot310.lib;opencv_reg310.lib;opencv_rgbd310.lib;opencv_saliency310.lib;opencv_shape310.lib;opencv_stereo310.lib;opencv_stitching310.lib;opencv_structured_light310.lib;opencv_superres310.lib;opencv_surface_matching310.lib;opencv_text310.lib;opencv_tracking310.lib;opencv_video310.lib;opencv_videoio310.lib;opencv_videostab310.lib;opencv_xfeatures2d310.lib;opencv_ximgproc310.lib;opencv_xobjdetect310.lib;opencv_xphoto310.lib;avcodec.lib;avdevice.lib;avfilter.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;pthreadVC2.lib;glut32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n      <AdditionalLibraryDirectories>..\\..\\src\\3rdparty\\opencv\\lib\\vs2013;..\\..\\src\\3rdparty\\ffmpeg\\lib;..\\..\\src\\3rdparty\\pthread\\lib;..\\..\\src\\3rdparty\\glut\\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n      <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>\n      <GenerateDebugInformation>true</GenerateDebugInformation>\n      <ProgramDatabaseFile>.\\obj\\release\\release.pdb</ProgramDatabaseFile>\n      <SubSystem>Console</SubSystem>\n      <StackReserveSize>0</StackReserveSize>\n      <StackCommitSize>0</StackCommitSize>\n      <OptimizeReferences>true</OptimizeReferences>\n      <EnableCOMDATFolding>true</EnableCOMDATFolding>\n      <TargetMachine>MachineX86</TargetMachine>\n    </Link>\n    <Manifest>\n      <AdditionalManifestFiles>..\\..\\src\\resource\\test.exe.manifest;%(AdditionalManifestFiles)</AdditionalManifestFiles>\n    </Manifest>\n  </ItemDefinitionGroup>\n  <ItemGroup>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\ardrone.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\command.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\config.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\navdata.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\tcp.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\udp.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\version.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\video.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\main.cpp\" />\n  </ItemGroup>\n  <ItemGroup>\n    <ClInclude Include=\"..\\..\\src\\ardrone\\ardrone.h\" />\n  </ItemGroup>\n  <ItemGroup>\n    <ResourceCompile Include=\"..\\..\\src\\resource\\resource.rc\" />\n  </ItemGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.targets\" />\n  <ImportGroup Label=\"ExtensionTargets\">\n  </ImportGroup>\n</Project>"
  },
  {
    "path": "build/vs2013/test.vcxproj.filters",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n  <ItemGroup>\n    <Filter Include=\"Source Files\">\n      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>\n      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>\n    </Filter>\n    <Filter Include=\"Source Files\\ardrone\">\n      <UniqueIdentifier>{550b0fc4-8368-48d8-b626-03df6c7d9121}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Header Files\">\n      <UniqueIdentifier>{06e2d9e1-74fe-4d29-9e94-f3ea80429740}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Header Files\\ardrone\">\n      <UniqueIdentifier>{19ba6cc5-e937-4c72-8c33-8d3581893111}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Resource Files\">\n      <UniqueIdentifier>{4cceaaea-23b5-4465-aeec-3b00075be422}</UniqueIdentifier>\n    </Filter>\n  </ItemGroup>\n  <ItemGroup>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\ardrone.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\command.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\navdata.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\udp.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\video.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\tcp.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\config.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\version.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\main.cpp\">\n      <Filter>Source Files</Filter>\n    </ClCompile>\n  </ItemGroup>\n  <ItemGroup>\n    <ClInclude Include=\"..\\..\\src\\ardrone\\ardrone.h\">\n      <Filter>Header Files\\ardrone</Filter>\n    </ClInclude>\n  </ItemGroup>\n  <ItemGroup>\n    <ResourceCompile Include=\"..\\..\\src\\resource\\resource.rc\">\n      <Filter>Resource Files</Filter>\n    </ResourceCompile>\n  </ItemGroup>\n</Project>"
  },
  {
    "path": "build/vs2015/test.sln",
    "content": "﻿\nMicrosoft Visual Studio Solution File, Format Version 12.00\n# Visual Studio 14\nVisualStudioVersion = 14.0.24720.0\nMinimumVisualStudioVersion = 10.0.40219.1\nProject(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"test\", \"test.vcxproj\", \"{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}\"\nEndProject\nGlobal\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\n\t\tRelease|Win32 = Release|Win32\n\tEndGlobalSection\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\n\t\t{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}.Release|Win32.ActiveCfg = Release|Win32\n\t\t{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}.Release|Win32.Build.0 = Release|Win32\n\tEndGlobalSection\n\tGlobalSection(SolutionProperties) = preSolution\n\t\tHideSolutionNode = FALSE\n\tEndGlobalSection\nEndGlobal\n"
  },
  {
    "path": "build/vs2015/test.vcxproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project DefaultTargets=\"Build\" ToolsVersion=\"14.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n  <ItemGroup Label=\"ProjectConfigurations\">\n    <ProjectConfiguration Include=\"Debug|Win32\">\n      <Configuration>Debug</Configuration>\n      <Platform>Win32</Platform>\n    </ProjectConfiguration>\n    <ProjectConfiguration Include=\"Release|Win32\">\n      <Configuration>Release</Configuration>\n      <Platform>Win32</Platform>\n    </ProjectConfiguration>\n  </ItemGroup>\n  <PropertyGroup Label=\"Globals\">\n    <ProjectGuid>{81224E4A-C4B9-418E-B87E-224D1ACD2FF1}</ProjectGuid>\n    <RootNamespace>\n    </RootNamespace>\n    <Keyword>Win32Proj</Keyword>\n    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>\n  </PropertyGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.Default.props\" />\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\" Label=\"Configuration\">\n    <ConfigurationType>Application</ConfigurationType>\n    <UseOfMfc>false</UseOfMfc>\n    <CharacterSet>MultiByte</CharacterSet>\n    <WholeProgramOptimization>true</WholeProgramOptimization>\n    <PlatformToolset>v140</PlatformToolset>\n  </PropertyGroup>\n  <PropertyGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\" Label=\"Configuration\">\n    <ConfigurationType>Application</ConfigurationType>\n    <UseOfMfc>false</UseOfMfc>\n    <CharacterSet>MultiByte</CharacterSet>\n    <PlatformToolset>v140</PlatformToolset>\n  </PropertyGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.props\" />\n  <ImportGroup Label=\"ExtensionSettings\">\n  </ImportGroup>\n  <ImportGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\" Label=\"PropertySheets\">\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\n  </ImportGroup>\n  <ImportGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\" Label=\"PropertySheets\">\n    <Import Project=\"$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props\" Condition=\"exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')\" Label=\"LocalAppDataPlatform\" />\n  </ImportGroup>\n  <PropertyGroup Label=\"UserMacros\" />\n  <PropertyGroup>\n    <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>\n    <OutDir Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">..\\..\\bin\\vs2015\\</OutDir>\n    <IntDir Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">.\\obj\\debug\\</IntDir>\n    <LinkIncremental Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">true</LinkIncremental>\n    <OutDir Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">..\\..\\bin\\vs2015\\</OutDir>\n    <IntDir Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">.\\obj\\release\\</IntDir>\n    <LinkIncremental Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">false</LinkIncremental>\n  </PropertyGroup>\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Debug|Win32'\">\n    <ClCompile>\n      <Optimization>Disabled</Optimization>\n      <AdditionalIncludeDirectories>..\\..\\src\\3rdparty\\opencv\\include;..\\..\\src\\3rdparty\\ffmpeg\\include;..\\..\\src\\3rdparty\\pthread\\include;..\\..\\src\\3rdparty\\glut\\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n      <PreprocessorDefinitions>WIN32;NOMINMAX;_DEBUG;_CONSOLE;_TIMESPEC_DEFINED;__STDC_CONSTANT_MACROS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n      <MinimalRebuild>true</MinimalRebuild>\n      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>\n      <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>\n      <PrecompiledHeader>\n      </PrecompiledHeader>\n      <WarningLevel>Level3</WarningLevel>\n      <DebugInformationFormat>EditAndContinue</DebugInformationFormat>\n    </ClCompile>\n    <Link>\n      <AdditionalDependencies>wsock32.lib;opencv_aruco310.lib;opencv_bgsegm310.lib;opencv_bioinspired310.lib;opencv_calib3d310.lib;opencv_ccalib310.lib;opencv_core310.lib;opencv_datasets310.lib;opencv_dnn310.lib;opencv_dpm310.lib;opencv_face310.lib;opencv_features2d310.lib;opencv_flann310.lib;opencv_fuzzy310.lib;opencv_highgui310.lib;opencv_imgcodecs310.lib;opencv_imgproc310.lib;opencv_line_descriptor310.lib;opencv_ml310.lib;opencv_objdetect310.lib;opencv_optflow310.lib;opencv_photo310.lib;opencv_plot310.lib;opencv_reg310.lib;opencv_rgbd310.lib;opencv_saliency310.lib;opencv_shape310.lib;opencv_stereo310.lib;opencv_stitching310.lib;opencv_structured_light310.lib;opencv_superres310.lib;opencv_surface_matching310.lib;opencv_text310.lib;opencv_tracking310.lib;opencv_video310.lib;opencv_videoio310.lib;opencv_videostab310.lib;opencv_xfeatures2d310.lib;opencv_ximgproc310.lib;opencv_xobjdetect310.lib;opencv_xphoto310.lib;avcodec.lib;avdevice.lib;avfilter.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;pthreadVC2.lib;glut32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n      <AdditionalLibraryDirectories>..\\..\\src\\3rdparty\\opencv\\lib\\vs2015;..\\..\\src\\3rdparty\\ffmpeg\\lib;..\\..\\src\\3rdparty\\pthread\\lib;..\\..\\src\\3rdparty\\glut\\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n      <GenerateDebugInformation>true</GenerateDebugInformation>\n      <ProgramDatabaseFile>.\\obj\\debug\\debug.pdb</ProgramDatabaseFile>\n      <SubSystem>Console</SubSystem>\n      <TargetMachine>MachineX86</TargetMachine>\n    </Link>\n    <Manifest>\n      <AdditionalManifestFiles>..\\..\\src\\resource\\test.exe.manifest;%(AdditionalManifestFiles)</AdditionalManifestFiles>\n    </Manifest>\n  </ItemDefinitionGroup>\n  <ItemDefinitionGroup Condition=\"'$(Configuration)|$(Platform)'=='Release|Win32'\">\n    <ClCompile>\n      <AdditionalIncludeDirectories>..\\..\\src\\3rdparty\\opencv\\include;..\\..\\src\\3rdparty\\ffmpeg\\include;..\\..\\src\\3rdparty\\pthread\\include;..\\..\\src\\3rdparty\\glut\\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n      <PreprocessorDefinitions>WIN32;NOMINMAX;NDEBUG;_CONSOLE;_TIMESPEC_DEFINED;__STDC_CONSTANT_MACROS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\n      <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\n      <PrecompiledHeader>\n      </PrecompiledHeader>\n      <WarningLevel>Level3</WarningLevel>\n      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\n      <DisableSpecificWarnings>4244;4819;4996;%(DisableSpecificWarnings)</DisableSpecificWarnings>\n    </ClCompile>\n    <Link>\n      <AdditionalDependencies>wsock32.lib;opencv_aruco310.lib;opencv_bgsegm310.lib;opencv_bioinspired310.lib;opencv_calib3d310.lib;opencv_ccalib310.lib;opencv_core310.lib;opencv_datasets310.lib;opencv_dnn310.lib;opencv_dpm310.lib;opencv_face310.lib;opencv_features2d310.lib;opencv_flann310.lib;opencv_fuzzy310.lib;opencv_highgui310.lib;opencv_imgcodecs310.lib;opencv_imgproc310.lib;opencv_line_descriptor310.lib;opencv_ml310.lib;opencv_objdetect310.lib;opencv_optflow310.lib;opencv_photo310.lib;opencv_plot310.lib;opencv_reg310.lib;opencv_rgbd310.lib;opencv_saliency310.lib;opencv_shape310.lib;opencv_stereo310.lib;opencv_stitching310.lib;opencv_structured_light310.lib;opencv_superres310.lib;opencv_surface_matching310.lib;opencv_text310.lib;opencv_tracking310.lib;opencv_video310.lib;opencv_videoio310.lib;opencv_videostab310.lib;opencv_xfeatures2d310.lib;opencv_ximgproc310.lib;opencv_xobjdetect310.lib;opencv_xphoto310.lib;avcodec.lib;avdevice.lib;avfilter.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;pthreadVC2.lib;glut32.lib;%(AdditionalDependencies)</AdditionalDependencies>\n      <AdditionalLibraryDirectories>..\\..\\src\\3rdparty\\opencv\\lib\\vs2015;..\\..\\src\\3rdparty\\ffmpeg\\lib;..\\..\\src\\3rdparty\\pthread\\lib;..\\..\\src\\3rdparty\\glut\\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n      <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>\n      <GenerateDebugInformation>true</GenerateDebugInformation>\n      <ProgramDatabaseFile>.\\obj\\release\\release.pdb</ProgramDatabaseFile>\n      <SubSystem>Console</SubSystem>\n      <StackReserveSize>0</StackReserveSize>\n      <StackCommitSize>0</StackCommitSize>\n      <OptimizeReferences>true</OptimizeReferences>\n      <EnableCOMDATFolding>true</EnableCOMDATFolding>\n      <TargetMachine>MachineX86</TargetMachine>\n    </Link>\n    <Manifest>\n      <AdditionalManifestFiles>..\\..\\src\\resource\\test.exe.manifest;%(AdditionalManifestFiles)</AdditionalManifestFiles>\n    </Manifest>\n  </ItemDefinitionGroup>\n  <ItemGroup>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\ardrone.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\command.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\config.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\navdata.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\tcp.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\udp.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\version.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\ardrone\\video.cpp\" />\n    <ClCompile Include=\"..\\..\\src\\main.cpp\" />\n  </ItemGroup>\n  <ItemGroup>\n    <ClInclude Include=\"..\\..\\src\\ardrone\\ardrone.h\" />\n  </ItemGroup>\n  <ItemGroup>\n    <ResourceCompile Include=\"..\\..\\src\\resource\\resource.rc\" />\n  </ItemGroup>\n  <Import Project=\"$(VCTargetsPath)\\Microsoft.Cpp.targets\" />\n  <ImportGroup Label=\"ExtensionTargets\">\n  </ImportGroup>\n</Project>"
  },
  {
    "path": "build/vs2015/test.vcxproj.filters",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n  <ItemGroup>\n    <Filter Include=\"Source Files\">\n      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>\n      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>\n    </Filter>\n    <Filter Include=\"Source Files\\ardrone\">\n      <UniqueIdentifier>{550b0fc4-8368-48d8-b626-03df6c7d9121}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Header Files\">\n      <UniqueIdentifier>{06e2d9e1-74fe-4d29-9e94-f3ea80429740}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Header Files\\ardrone\">\n      <UniqueIdentifier>{19ba6cc5-e937-4c72-8c33-8d3581893111}</UniqueIdentifier>\n    </Filter>\n    <Filter Include=\"Resource Files\">\n      <UniqueIdentifier>{4cceaaea-23b5-4465-aeec-3b00075be422}</UniqueIdentifier>\n    </Filter>\n  </ItemGroup>\n  <ItemGroup>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\ardrone.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\command.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\navdata.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\udp.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\video.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\tcp.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\config.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\ardrone\\version.cpp\">\n      <Filter>Source Files\\ardrone</Filter>\n    </ClCompile>\n    <ClCompile Include=\"..\\..\\src\\main.cpp\">\n      <Filter>Source Files</Filter>\n    </ClCompile>\n  </ItemGroup>\n  <ItemGroup>\n    <ClInclude Include=\"..\\..\\src\\ardrone\\ardrone.h\">\n      <Filter>Header Files\\ardrone</Filter>\n    </ClInclude>\n  </ItemGroup>\n  <ItemGroup>\n    <ResourceCompile Include=\"..\\..\\src\\resource\\resource.rc\">\n      <Filter>Resource Files</Filter>\n    </ResourceCompile>\n  </ItemGroup>\n</Project>"
  },
  {
    "path": "cvdrone-license-BSD.txt",
    "content": "CV Drone (= OpenCV + AR.Drone)\r\nCopyright (C) 2013-2015 puku0x\r\n\r\nRedistribution and use in source and binary forms, with or without modification,\r\n are permitted provided that the following conditions are met:\r\n\r\n    * Redistributions of source code must retain the above copyright notice, this list of\r\n      conditions and the following disclaimer.\r\n      \r\n    * Redistributions in binary form must reproduce the above copyright notice, this list \r\n      of conditions and the following disclaimer in the documentation and/or other materials \r\n      provided with the distribution.\r\n      \r\n    * Neither the name of the \"CV Drone\" nor the names of its contributors may be used\r\n      to endorse or promote products derived from this software without specific prior\r\n      written permission.\r\n\r\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS\r\nOR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF \r\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. \r\nIN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\r\nINDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \r\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r\n DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\r\n WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\r\n IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH \r\n DAMAGE."
  },
  {
    "path": "cvdrone-license-LGPL.txt",
    "content": "\t\t  GNU LESSER GENERAL PUBLIC LICENSE\n\t\t       Version 2.1, February 1999\n\n Copyright (C) 1991, 1999 Free Software Foundation, Inc.\n     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n[This is the first released version of the Lesser GPL.  It also counts\n as the successor of the GNU Library Public License, version 2, hence\n the version number 2.1.]\n\n\t\t\t    Preamble\n\n  The licenses for most software are designed to take away your\nfreedom to share and change it.  By contrast, the GNU General Public\nLicenses are intended to guarantee your freedom to share and change\nfree software--to make sure the software is free for all its users.\n\n  This license, the Lesser General Public License, applies to some\nspecially designated software packages--typically libraries--of the\nFree Software Foundation and other authors who decide to use it.  You\ncan use it too, but we suggest you first think carefully about whether\nthis license or the ordinary General Public License is the better\nstrategy to use in any particular case, based on the explanations below.\n\n  When we speak of free software, we are referring to freedom of use,\nnot price.  Our General Public Licenses are designed to make sure that\nyou have the freedom to distribute copies of free software (and charge\nfor this service if you wish); that you receive source code or can get\nit if you want it; that you can change the software and use pieces of\nit in new free programs; and that you are informed that you can do\nthese things.\n\n  To protect your rights, we need to make restrictions that forbid\ndistributors to deny you these rights or to ask you to surrender these\nrights.  These restrictions translate to certain responsibilities for\nyou if you distribute copies of the library or if you modify it.\n\n  For example, if you distribute copies of the library, whether gratis\nor for a fee, you must give the recipients all the rights that we gave\nyou.  You must make sure that they, too, receive or can get the source\ncode.  If you link other code with the library, you must provide\ncomplete object files to the recipients, so that they can relink them\nwith the library after making changes to the library and recompiling\nit.  And you must show them these terms so they know their rights.\n\n  We protect your rights with a two-step method: (1) we copyright the\nlibrary, and (2) we offer you this license, which gives you legal\npermission to copy, distribute and/or modify the library.\n\n  To protect each distributor, we want to make it very clear that\nthere is no warranty for the free library.  Also, if the library is\nmodified by someone else and passed on, the recipients should know\nthat what they have is not the original version, so that the original\nauthor's reputation will not be affected by problems that might be\nintroduced by others.\n\n  Finally, software patents pose a constant threat to the existence of\nany free program.  We wish to make sure that a company cannot\neffectively restrict the users of a free program by obtaining a\nrestrictive license from a patent holder.  Therefore, we insist that\nany patent license obtained for a version of the library must be\nconsistent with the full freedom of use specified in this license.\n\n  Most GNU software, including some libraries, is covered by the\nordinary GNU General Public License.  This license, the GNU Lesser\nGeneral Public License, applies to certain designated libraries, and\nis quite different from the ordinary General Public License.  We use\nthis license for certain libraries in order to permit linking those\nlibraries into non-free programs.\n\n  When a program is linked with a library, whether statically or using\na shared library, the combination of the two is legally speaking a\ncombined work, a derivative of the original library.  The ordinary\nGeneral Public License therefore permits such linking only if the\nentire combination fits its criteria of freedom.  The Lesser General\nPublic License permits more lax criteria for linking other code with\nthe library.\n\n  We call this license the \"Lesser\" General Public License because it\ndoes Less to protect the user's freedom than the ordinary General\nPublic License.  It also provides other free software developers Less\nof an advantage over competing non-free programs.  These disadvantages\nare the reason we use the ordinary General Public License for many\nlibraries.  However, the Lesser license provides advantages in certain\nspecial circumstances.\n\n  For example, on rare occasions, there may be a special need to\nencourage the widest possible use of a certain library, so that it becomes\na de-facto standard.  To achieve this, non-free programs must be\nallowed to use the library.  A more frequent case is that a free\nlibrary does the same job as widely used non-free libraries.  In this\ncase, there is little to gain by limiting the free library to free\nsoftware only, so we use the Lesser General Public License.\n\n  In other cases, permission to use a particular library in non-free\nprograms enables a greater number of people to use a large body of\nfree software.  For example, permission to use the GNU C Library in\nnon-free programs enables many more people to use the whole GNU\noperating system, as well as its variant, the GNU/Linux operating\nsystem.\n\n  Although the Lesser General Public License is Less protective of the\nusers' freedom, it does ensure that the user of a program that is\nlinked with the Library has the freedom and the wherewithal to run\nthat program using a modified version of the Library.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.  Pay close attention to the difference between a\n\"work based on the library\" and a \"work that uses the library\".  The\nformer contains code derived from the library, whereas the latter must\nbe combined with the library in order to run.\n\n\t\t  GNU LESSER GENERAL PUBLIC LICENSE\n   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n  0. This License Agreement applies to any software library or other\nprogram which contains a notice placed by the copyright holder or\nother authorized party saying it may be distributed under the terms of\nthis Lesser General Public License (also called \"this License\").\nEach licensee is addressed as \"you\".\n\n  A \"library\" means a collection of software functions and/or data\nprepared so as to be conveniently linked with application programs\n(which use some of those functions and data) to form executables.\n\n  The \"Library\", below, refers to any such software library or work\nwhich has been distributed under these terms.  A \"work based on the\nLibrary\" means either the Library or any derivative work under\ncopyright law: that is to say, a work containing the Library or a\nportion of it, either verbatim or with modifications and/or translated\nstraightforwardly into another language.  (Hereinafter, translation is\nincluded without limitation in the term \"modification\".)\n\n  \"Source code\" for a work means the preferred form of the work for\nmaking modifications to it.  For a library, complete source code means\nall the source code for all modules it contains, plus any associated\ninterface definition files, plus the scripts used to control compilation\nand installation of the library.\n\n  Activities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope.  The act of\nrunning a program using the Library is not restricted, and output from\nsuch a program is covered only if its contents constitute a work based\non the Library (independent of the use of the Library in a tool for\nwriting it).  Whether that is true depends on what the Library does\nand what the program that uses the Library does.\n  \n  1. You may copy and distribute verbatim copies of the Library's\ncomplete source code as you receive it, in any medium, provided that\nyou conspicuously and appropriately publish on each copy an\nappropriate copyright notice and disclaimer of warranty; keep intact\nall the notices that refer to this License and to the absence of any\nwarranty; and distribute a copy of this License along with the\nLibrary.\n\n  You may charge a fee for the physical act of transferring a copy,\nand you may at your option offer warranty protection in exchange for a\nfee.\n\n  2. You may modify your copy or copies of the Library or any portion\nof it, thus forming a work based on the Library, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n    a) The modified work must itself be a software library.\n\n    b) You must cause the files modified to carry prominent notices\n    stating that you changed the files and the date of any change.\n\n    c) You must cause the whole of the work to be licensed at no\n    charge to all third parties under the terms of this License.\n\n    d) If a facility in the modified Library refers to a function or a\n    table of data to be supplied by an application program that uses\n    the facility, other than as an argument passed when the facility\n    is invoked, then you must make a good faith effort to ensure that,\n    in the event an application does not supply such function or\n    table, the facility still operates, and performs whatever part of\n    its purpose remains meaningful.\n\n    (For example, a function in a library to compute square roots has\n    a purpose that is entirely well-defined independent of the\n    application.  Therefore, Subsection 2d requires that any\n    application-supplied function or table used by this function must\n    be optional: if the application does not supply it, the square\n    root function must still compute square roots.)\n\nThese requirements apply to the modified work as a whole.  If\nidentifiable sections of that work are not derived from the Library,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works.  But when you\ndistribute the same sections as part of a whole which is a work based\non the Library, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote\nit.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Library.\n\nIn addition, mere aggregation of another work not based on the Library\nwith the Library (or with a work based on the Library) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n  3. You may opt to apply the terms of the ordinary GNU General Public\nLicense instead of this License to a given copy of the Library.  To do\nthis, you must alter all the notices that refer to this License, so\nthat they refer to the ordinary GNU General Public License, version 2,\ninstead of to this License.  (If a newer version than version 2 of the\nordinary GNU General Public License has appeared, then you can specify\nthat version instead if you wish.)  Do not make any other change in\nthese notices.\n\n  Once this change is made in a given copy, it is irreversible for\nthat copy, so the ordinary GNU General Public License applies to all\nsubsequent copies and derivative works made from that copy.\n\n  This option is useful when you wish to copy part of the code of\nthe Library into a program that is not a library.\n\n  4. You may copy and distribute the Library (or a portion or\nderivative of it, under Section 2) in object code or executable form\nunder the terms of Sections 1 and 2 above provided that you accompany\nit with the complete corresponding machine-readable source code, which\nmust be distributed under the terms of Sections 1 and 2 above on a\nmedium customarily used for software interchange.\n\n  If distribution of object code is made by offering access to copy\nfrom a designated place, then offering equivalent access to copy the\nsource code from the same place satisfies the requirement to\ndistribute the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n  5. A program that contains no derivative of any portion of the\nLibrary, but is designed to work with the Library by being compiled or\nlinked with it, is called a \"work that uses the Library\".  Such a\nwork, in isolation, is not a derivative work of the Library, and\ntherefore falls outside the scope of this License.\n\n  However, linking a \"work that uses the Library\" with the Library\ncreates an executable that is a derivative of the Library (because it\ncontains portions of the Library), rather than a \"work that uses the\nlibrary\".  The executable is therefore covered by this License.\nSection 6 states terms for distribution of such executables.\n\n  When a \"work that uses the Library\" uses material from a header file\nthat is part of the Library, the object code for the work may be a\nderivative work of the Library even though the source code is not.\nWhether this is true is especially significant if the work can be\nlinked without the Library, or if the work is itself a library.  The\nthreshold for this to be true is not precisely defined by law.\n\n  If such an object file uses only numerical parameters, data\nstructure layouts and accessors, and small macros and small inline\nfunctions (ten lines or less in length), then the use of the object\nfile is unrestricted, regardless of whether it is legally a derivative\nwork.  (Executables containing this object code plus portions of the\nLibrary will still fall under Section 6.)\n\n  Otherwise, if the work is a derivative of the Library, you may\ndistribute the object code for the work under the terms of Section 6.\nAny executables containing that work also fall under Section 6,\nwhether or not they are linked directly with the Library itself.\n\n  6. As an exception to the Sections above, you may also combine or\nlink a \"work that uses the Library\" with the Library to produce a\nwork containing portions of the Library, and distribute that work\nunder terms of your choice, provided that the terms permit\nmodification of the work for the customer's own use and reverse\nengineering for debugging such modifications.\n\n  You must give prominent notice with each copy of the work that the\nLibrary is used in it and that the Library and its use are covered by\nthis License.  You must supply a copy of this License.  If the work\nduring execution displays copyright notices, you must include the\ncopyright notice for the Library among them, as well as a reference\ndirecting the user to the copy of this License.  Also, you must do one\nof these things:\n\n    a) Accompany the work with the complete corresponding\n    machine-readable source code for the Library including whatever\n    changes were used in the work (which must be distributed under\n    Sections 1 and 2 above); and, if the work is an executable linked\n    with the Library, with the complete machine-readable \"work that\n    uses the Library\", as object code and/or source code, so that the\n    user can modify the Library and then relink to produce a modified\n    executable containing the modified Library.  (It is understood\n    that the user who changes the contents of definitions files in the\n    Library will not necessarily be able to recompile the application\n    to use the modified definitions.)\n\n    b) Use a suitable shared library mechanism for linking with the\n    Library.  A suitable mechanism is one that (1) uses at run time a\n    copy of the library already present on the user's computer system,\n    rather than copying library functions into the executable, and (2)\n    will operate properly with a modified version of the library, if\n    the user installs one, as long as the modified version is\n    interface-compatible with the version that the work was made with.\n\n    c) Accompany the work with a written offer, valid for at\n    least three years, to give the same user the materials\n    specified in Subsection 6a, above, for a charge no more\n    than the cost of performing this distribution.\n\n    d) If distribution of the work is made by offering access to copy\n    from a designated place, offer equivalent access to copy the above\n    specified materials from the same place.\n\n    e) Verify that the user has already received a copy of these\n    materials or that you have already sent this user a copy.\n\n  For an executable, the required form of the \"work that uses the\nLibrary\" must include any data and utility programs needed for\nreproducing the executable from it.  However, as a special exception,\nthe materials to be distributed need not include anything that is\nnormally distributed (in either source or binary form) with the major\ncomponents (compiler, kernel, and so on) of the operating system on\nwhich the executable runs, unless that component itself accompanies\nthe executable.\n\n  It may happen that this requirement contradicts the license\nrestrictions of other proprietary libraries that do not normally\naccompany the operating system.  Such a contradiction means you cannot\nuse both them and the Library together in an executable that you\ndistribute.\n\f\n  7. You may place library facilities that are a work based on the\nLibrary side-by-side in a single library together with other library\nfacilities not covered by this License, and distribute such a combined\nlibrary, provided that the separate distribution of the work based on\nthe Library and of the other library facilities is otherwise\npermitted, and provided that you do these two things:\n\n    a) Accompany the combined library with a copy of the same work\n    based on the Library, uncombined with any other library\n    facilities.  This must be distributed under the terms of the\n    Sections above.\n\n    b) Give prominent notice with the combined library of the fact\n    that part of it is a work based on the Library, and explaining\n    where to find the accompanying uncombined form of the same work.\n\n  8. You may not copy, modify, sublicense, link with, or distribute\nthe Library except as expressly provided under this License.  Any\nattempt otherwise to copy, modify, sublicense, link with, or\ndistribute the Library is void, and will automatically terminate your\nrights under this License.  However, parties who have received copies,\nor rights, from you under this License will not have their licenses\nterminated so long as such parties remain in full compliance.\n\n  9. You are not required to accept this License, since you have not\nsigned it.  However, nothing else grants you permission to modify or\ndistribute the Library or its derivative works.  These actions are\nprohibited by law if you do not accept this License.  Therefore, by\nmodifying or distributing the Library (or any work based on the\nLibrary), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Library or works based on it.\n\n  10. Each time you redistribute the Library (or any work based on the\nLibrary), the recipient automatically receives a license from the\noriginal licensor to copy, distribute, link with or modify the Library\nsubject to these terms and conditions.  You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties with\nthis License.\n\f\n  11. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Library at all.  For example, if a patent\nlicense would not permit royalty-free redistribution of the Library by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Library.\n\nIf any portion of this section is held invalid or unenforceable under any\nparticular circumstance, the balance of the section is intended to apply,\nand the section as a whole is intended to apply in other circumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system which is\nimplemented by public license practices.  Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n  12. If the distribution and/or use of the Library is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Library under this License may add\nan explicit geographical distribution limitation excluding those countries,\nso that distribution is permitted only in or among countries not thus\nexcluded.  In such case, this License incorporates the limitation as if\nwritten in the body of this License.\n\n  13. The Free Software Foundation may publish revised and/or new\nversions of the Lesser General Public License from time to time.\nSuch new versions will be similar in spirit to the present version,\nbut may differ in detail to address new problems or concerns.\n\nEach version is given a distinguishing version number.  If the Library\nspecifies a version number of this License which applies to it and\n\"any later version\", you have the option of following the terms and\nconditions either of that version or of any later version published by\nthe Free Software Foundation.  If the Library does not specify a\nlicense version number, you may choose any version ever published by\nthe Free Software Foundation.\n\n  14. If you wish to incorporate parts of the Library into other free\nprograms whose distribution conditions are incompatible with these,\nwrite to the author to ask for permission.  For software which is\ncopyrighted by the Free Software Foundation, write to the Free\nSoftware Foundation; we sometimes make exceptions for this.  Our\ndecision will be guided by the two goals of preserving the free status\nof all derivatives of our free software and of promoting the sharing\nand reuse of software generally.\n\n\t\t\t    NO WARRANTY\n\n  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO\nWARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.\nEXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR\nOTHER PARTIES PROVIDE THE LIBRARY \"AS IS\" WITHOUT WARRANTY OF ANY\nKIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE\nLIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME\nTHE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN\nWRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY\nAND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU\nFOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR\nCONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE\nLIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING\nRENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A\nFAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF\nSUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGES.\n\n\t\t     END OF TERMS AND CONDITIONS\n\n           How to Apply These Terms to Your New Libraries\n\n  If you develop a new library, and you want it to be of the greatest\npossible use to the public, we recommend making it free software that\neveryone can redistribute and change.  You can do so by permitting\nredistribution under these terms (or, alternatively, under the terms of the\nordinary General Public License).\n\n  To apply these terms, attach the following notices to the library.  It is\nsafest to attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least the\n\"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the library's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This library is free software; you can redistribute it and/or\n    modify it under the terms of the GNU Lesser General Public\n    License as published by the Free Software Foundation; either\n    version 2.1 of the License, or (at your option) any later version.\n\n    This library is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n    Lesser General Public License for more details.\n\n    You should have received a copy of the GNU Lesser General Public\n    License along with this library; if not, write to the Free Software\n    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\n\nAlso add information on how to contact you by electronic and paper mail.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the library, if\nnecessary.  Here is a sample; alter the names:\n\n  Yoyodyne, Inc., hereby disclaims all copyright interest in the\n  library `Frob' (a library for tweaking knobs) written by James Random Hacker.\n\n  <signature of Ty Coon>, 1 April 1990\n  Ty Coon, President of Vice\n\nThat's all there is to it!\n"
  },
  {
    "path": "licenses/FFmpeg-LGPLv2.1.txt",
    "content": "                  GNU LESSER GENERAL PUBLIC LICENSE\n                       Version 2.1, February 1999\n\n Copyright (C) 1991, 1999 Free Software Foundation, Inc.\n 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n[This is the first released version of the Lesser GPL.  It also counts\n as the successor of the GNU Library Public License, version 2, hence\n the version number 2.1.]\n\n                            Preamble\n\n  The licenses for most software are designed to take away your\nfreedom to share and change it.  By contrast, the GNU General Public\nLicenses are intended to guarantee your freedom to share and change\nfree software--to make sure the software is free for all its users.\n\n  This license, the Lesser General Public License, applies to some\nspecially designated software packages--typically libraries--of the\nFree Software Foundation and other authors who decide to use it.  You\ncan use it too, but we suggest you first think carefully about whether\nthis license or the ordinary General Public License is the better\nstrategy to use in any particular case, based on the explanations below.\n\n  When we speak of free software, we are referring to freedom of use,\nnot price.  Our General Public Licenses are designed to make sure that\nyou have the freedom to distribute copies of free software (and charge\nfor this service if you wish); that you receive source code or can get\nit if you want it; that you can change the software and use pieces of\nit in new free programs; and that you are informed that you can do\nthese things.\n\n  To protect your rights, we need to make restrictions that forbid\ndistributors to deny you these rights or to ask you to surrender these\nrights.  These restrictions translate to certain responsibilities for\nyou if you distribute copies of the library or if you modify it.\n\n  For example, if you distribute copies of the library, whether gratis\nor for a fee, you must give the recipients all the rights that we gave\nyou.  You must make sure that they, too, receive or can get the source\ncode.  If you link other code with the library, you must provide\ncomplete object files to the recipients, so that they can relink them\nwith the library after making changes to the library and recompiling\nit.  And you must show them these terms so they know their rights.\n\n  We protect your rights with a two-step method: (1) we copyright the\nlibrary, and (2) we offer you this license, which gives you legal\npermission to copy, distribute and/or modify the library.\n\n  To protect each distributor, we want to make it very clear that\nthere is no warranty for the free library.  Also, if the library is\nmodified by someone else and passed on, the recipients should know\nthat what they have is not the original version, so that the original\nauthor's reputation will not be affected by problems that might be\nintroduced by others.\n\f\n  Finally, software patents pose a constant threat to the existence of\nany free program.  We wish to make sure that a company cannot\neffectively restrict the users of a free program by obtaining a\nrestrictive license from a patent holder.  Therefore, we insist that\nany patent license obtained for a version of the library must be\nconsistent with the full freedom of use specified in this license.\n\n  Most GNU software, including some libraries, is covered by the\nordinary GNU General Public License.  This license, the GNU Lesser\nGeneral Public License, applies to certain designated libraries, and\nis quite different from the ordinary General Public License.  We use\nthis license for certain libraries in order to permit linking those\nlibraries into non-free programs.\n\n  When a program is linked with a library, whether statically or using\na shared library, the combination of the two is legally speaking a\ncombined work, a derivative of the original library.  The ordinary\nGeneral Public License therefore permits such linking only if the\nentire combination fits its criteria of freedom.  The Lesser General\nPublic License permits more lax criteria for linking other code with\nthe library.\n\n  We call this license the \"Lesser\" General Public License because it\ndoes Less to protect the user's freedom than the ordinary General\nPublic License.  It also provides other free software developers Less\nof an advantage over competing non-free programs.  These disadvantages\nare the reason we use the ordinary General Public License for many\nlibraries.  However, the Lesser license provides advantages in certain\nspecial circumstances.\n\n  For example, on rare occasions, there may be a special need to\nencourage the widest possible use of a certain library, so that it becomes\na de-facto standard.  To achieve this, non-free programs must be\nallowed to use the library.  A more frequent case is that a free\nlibrary does the same job as widely used non-free libraries.  In this\ncase, there is little to gain by limiting the free library to free\nsoftware only, so we use the Lesser General Public License.\n\n  In other cases, permission to use a particular library in non-free\nprograms enables a greater number of people to use a large body of\nfree software.  For example, permission to use the GNU C Library in\nnon-free programs enables many more people to use the whole GNU\noperating system, as well as its variant, the GNU/Linux operating\nsystem.\n\n  Although the Lesser General Public License is Less protective of the\nusers' freedom, it does ensure that the user of a program that is\nlinked with the Library has the freedom and the wherewithal to run\nthat program using a modified version of the Library.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.  Pay close attention to the difference between a\n\"work based on the library\" and a \"work that uses the library\".  The\nformer contains code derived from the library, whereas the latter must\nbe combined with the library in order to run.\n\f\n                  GNU LESSER GENERAL PUBLIC LICENSE\n   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n  0. This License Agreement applies to any software library or other\nprogram which contains a notice placed by the copyright holder or\nother authorized party saying it may be distributed under the terms of\nthis Lesser General Public License (also called \"this License\").\nEach licensee is addressed as \"you\".\n\n  A \"library\" means a collection of software functions and/or data\nprepared so as to be conveniently linked with application programs\n(which use some of those functions and data) to form executables.\n\n  The \"Library\", below, refers to any such software library or work\nwhich has been distributed under these terms.  A \"work based on the\nLibrary\" means either the Library or any derivative work under\ncopyright law: that is to say, a work containing the Library or a\nportion of it, either verbatim or with modifications and/or translated\nstraightforwardly into another language.  (Hereinafter, translation is\nincluded without limitation in the term \"modification\".)\n\n  \"Source code\" for a work means the preferred form of the work for\nmaking modifications to it.  For a library, complete source code means\nall the source code for all modules it contains, plus any associated\ninterface definition files, plus the scripts used to control compilation\nand installation of the library.\n\n  Activities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope.  The act of\nrunning a program using the Library is not restricted, and output from\nsuch a program is covered only if its contents constitute a work based\non the Library (independent of the use of the Library in a tool for\nwriting it).  Whether that is true depends on what the Library does\nand what the program that uses the Library does.\n\n  1. You may copy and distribute verbatim copies of the Library's\ncomplete source code as you receive it, in any medium, provided that\nyou conspicuously and appropriately publish on each copy an\nappropriate copyright notice and disclaimer of warranty; keep intact\nall the notices that refer to this License and to the absence of any\nwarranty; and distribute a copy of this License along with the\nLibrary.\n\n  You may charge a fee for the physical act of transferring a copy,\nand you may at your option offer warranty protection in exchange for a\nfee.\n\f\n  2. You may modify your copy or copies of the Library or any portion\nof it, thus forming a work based on the Library, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n    a) The modified work must itself be a software library.\n\n    b) You must cause the files modified to carry prominent notices\n    stating that you changed the files and the date of any change.\n\n    c) You must cause the whole of the work to be licensed at no\n    charge to all third parties under the terms of this License.\n\n    d) If a facility in the modified Library refers to a function or a\n    table of data to be supplied by an application program that uses\n    the facility, other than as an argument passed when the facility\n    is invoked, then you must make a good faith effort to ensure that,\n    in the event an application does not supply such function or\n    table, the facility still operates, and performs whatever part of\n    its purpose remains meaningful.\n\n    (For example, a function in a library to compute square roots has\n    a purpose that is entirely well-defined independent of the\n    application.  Therefore, Subsection 2d requires that any\n    application-supplied function or table used by this function must\n    be optional: if the application does not supply it, the square\n    root function must still compute square roots.)\n\nThese requirements apply to the modified work as a whole.  If\nidentifiable sections of that work are not derived from the Library,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works.  But when you\ndistribute the same sections as part of a whole which is a work based\non the Library, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote\nit.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Library.\n\nIn addition, mere aggregation of another work not based on the Library\nwith the Library (or with a work based on the Library) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n  3. You may opt to apply the terms of the ordinary GNU General Public\nLicense instead of this License to a given copy of the Library.  To do\nthis, you must alter all the notices that refer to this License, so\nthat they refer to the ordinary GNU General Public License, version 2,\ninstead of to this License.  (If a newer version than version 2 of the\nordinary GNU General Public License has appeared, then you can specify\nthat version instead if you wish.)  Do not make any other change in\nthese notices.\n\f\n  Once this change is made in a given copy, it is irreversible for\nthat copy, so the ordinary GNU General Public License applies to all\nsubsequent copies and derivative works made from that copy.\n\n  This option is useful when you wish to copy part of the code of\nthe Library into a program that is not a library.\n\n  4. You may copy and distribute the Library (or a portion or\nderivative of it, under Section 2) in object code or executable form\nunder the terms of Sections 1 and 2 above provided that you accompany\nit with the complete corresponding machine-readable source code, which\nmust be distributed under the terms of Sections 1 and 2 above on a\nmedium customarily used for software interchange.\n\n  If distribution of object code is made by offering access to copy\nfrom a designated place, then offering equivalent access to copy the\nsource code from the same place satisfies the requirement to\ndistribute the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n  5. A program that contains no derivative of any portion of the\nLibrary, but is designed to work with the Library by being compiled or\nlinked with it, is called a \"work that uses the Library\".  Such a\nwork, in isolation, is not a derivative work of the Library, and\ntherefore falls outside the scope of this License.\n\n  However, linking a \"work that uses the Library\" with the Library\ncreates an executable that is a derivative of the Library (because it\ncontains portions of the Library), rather than a \"work that uses the\nlibrary\".  The executable is therefore covered by this License.\nSection 6 states terms for distribution of such executables.\n\n  When a \"work that uses the Library\" uses material from a header file\nthat is part of the Library, the object code for the work may be a\nderivative work of the Library even though the source code is not.\nWhether this is true is especially significant if the work can be\nlinked without the Library, or if the work is itself a library.  The\nthreshold for this to be true is not precisely defined by law.\n\n  If such an object file uses only numerical parameters, data\nstructure layouts and accessors, and small macros and small inline\nfunctions (ten lines or less in length), then the use of the object\nfile is unrestricted, regardless of whether it is legally a derivative\nwork.  (Executables containing this object code plus portions of the\nLibrary will still fall under Section 6.)\n\n  Otherwise, if the work is a derivative of the Library, you may\ndistribute the object code for the work under the terms of Section 6.\nAny executables containing that work also fall under Section 6,\nwhether or not they are linked directly with the Library itself.\n\f\n  6. As an exception to the Sections above, you may also combine or\nlink a \"work that uses the Library\" with the Library to produce a\nwork containing portions of the Library, and distribute that work\nunder terms of your choice, provided that the terms permit\nmodification of the work for the customer's own use and reverse\nengineering for debugging such modifications.\n\n  You must give prominent notice with each copy of the work that the\nLibrary is used in it and that the Library and its use are covered by\nthis License.  You must supply a copy of this License.  If the work\nduring execution displays copyright notices, you must include the\ncopyright notice for the Library among them, as well as a reference\ndirecting the user to the copy of this License.  Also, you must do one\nof these things:\n\n    a) Accompany the work with the complete corresponding\n    machine-readable source code for the Library including whatever\n    changes were used in the work (which must be distributed under\n    Sections 1 and 2 above); and, if the work is an executable linked\n    with the Library, with the complete machine-readable \"work that\n    uses the Library\", as object code and/or source code, so that the\n    user can modify the Library and then relink to produce a modified\n    executable containing the modified Library.  (It is understood\n    that the user who changes the contents of definitions files in the\n    Library will not necessarily be able to recompile the application\n    to use the modified definitions.)\n\n    b) Use a suitable shared library mechanism for linking with the\n    Library.  A suitable mechanism is one that (1) uses at run time a\n    copy of the library already present on the user's computer system,\n    rather than copying library functions into the executable, and (2)\n    will operate properly with a modified version of the library, if\n    the user installs one, as long as the modified version is\n    interface-compatible with the version that the work was made with.\n\n    c) Accompany the work with a written offer, valid for at\n    least three years, to give the same user the materials\n    specified in Subsection 6a, above, for a charge no more\n    than the cost of performing this distribution.\n\n    d) If distribution of the work is made by offering access to copy\n    from a designated place, offer equivalent access to copy the above\n    specified materials from the same place.\n\n    e) Verify that the user has already received a copy of these\n    materials or that you have already sent this user a copy.\n\n  For an executable, the required form of the \"work that uses the\nLibrary\" must include any data and utility programs needed for\nreproducing the executable from it.  However, as a special exception,\nthe materials to be distributed need not include anything that is\nnormally distributed (in either source or binary form) with the major\ncomponents (compiler, kernel, and so on) of the operating system on\nwhich the executable runs, unless that component itself accompanies\nthe executable.\n\n  It may happen that this requirement contradicts the license\nrestrictions of other proprietary libraries that do not normally\naccompany the operating system.  Such a contradiction means you cannot\nuse both them and the Library together in an executable that you\ndistribute.\n\f\n  7. You may place library facilities that are a work based on the\nLibrary side-by-side in a single library together with other library\nfacilities not covered by this License, and distribute such a combined\nlibrary, provided that the separate distribution of the work based on\nthe Library and of the other library facilities is otherwise\npermitted, and provided that you do these two things:\n\n    a) Accompany the combined library with a copy of the same work\n    based on the Library, uncombined with any other library\n    facilities.  This must be distributed under the terms of the\n    Sections above.\n\n    b) Give prominent notice with the combined library of the fact\n    that part of it is a work based on the Library, and explaining\n    where to find the accompanying uncombined form of the same work.\n\n  8. You may not copy, modify, sublicense, link with, or distribute\nthe Library except as expressly provided under this License.  Any\nattempt otherwise to copy, modify, sublicense, link with, or\ndistribute the Library is void, and will automatically terminate your\nrights under this License.  However, parties who have received copies,\nor rights, from you under this License will not have their licenses\nterminated so long as such parties remain in full compliance.\n\n  9. You are not required to accept this License, since you have not\nsigned it.  However, nothing else grants you permission to modify or\ndistribute the Library or its derivative works.  These actions are\nprohibited by law if you do not accept this License.  Therefore, by\nmodifying or distributing the Library (or any work based on the\nLibrary), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Library or works based on it.\n\n  10. Each time you redistribute the Library (or any work based on the\nLibrary), the recipient automatically receives a license from the\noriginal licensor to copy, distribute, link with or modify the Library\nsubject to these terms and conditions.  You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties with\nthis License.\n\f\n  11. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Library at all.  For example, if a patent\nlicense would not permit royalty-free redistribution of the Library by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Library.\n\nIf any portion of this section is held invalid or unenforceable under any\nparticular circumstance, the balance of the section is intended to apply,\nand the section as a whole is intended to apply in other circumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system which is\nimplemented by public license practices.  Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n  12. If the distribution and/or use of the Library is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Library under this License may add\nan explicit geographical distribution limitation excluding those countries,\nso that distribution is permitted only in or among countries not thus\nexcluded.  In such case, this License incorporates the limitation as if\nwritten in the body of this License.\n\n  13. The Free Software Foundation may publish revised and/or new\nversions of the Lesser General Public License from time to time.\nSuch new versions will be similar in spirit to the present version,\nbut may differ in detail to address new problems or concerns.\n\nEach version is given a distinguishing version number.  If the Library\nspecifies a version number of this License which applies to it and\n\"any later version\", you have the option of following the terms and\nconditions either of that version or of any later version published by\nthe Free Software Foundation.  If the Library does not specify a\nlicense version number, you may choose any version ever published by\nthe Free Software Foundation.\n\f\n  14. If you wish to incorporate parts of the Library into other free\nprograms whose distribution conditions are incompatible with these,\nwrite to the author to ask for permission.  For software which is\ncopyrighted by the Free Software Foundation, write to the Free\nSoftware Foundation; we sometimes make exceptions for this.  Our\ndecision will be guided by the two goals of preserving the free status\nof all derivatives of our free software and of promoting the sharing\nand reuse of software generally.\n\n                            NO WARRANTY\n\n  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO\nWARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.\nEXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR\nOTHER PARTIES PROVIDE THE LIBRARY \"AS IS\" WITHOUT WARRANTY OF ANY\nKIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE\nLIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME\nTHE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN\nWRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY\nAND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU\nFOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR\nCONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE\nLIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING\nRENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A\nFAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF\nSUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGES.\n\n                     END OF TERMS AND CONDITIONS\n\f\n           How to Apply These Terms to Your New Libraries\n\n  If you develop a new library, and you want it to be of the greatest\npossible use to the public, we recommend making it free software that\neveryone can redistribute and change.  You can do so by permitting\nredistribution under these terms (or, alternatively, under the terms of the\nordinary General Public License).\n\n  To apply these terms, attach the following notices to the library.  It is\nsafest to attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least the\n\"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the library's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This library is free software; you can redistribute it and/or\n    modify it under the terms of the GNU Lesser General Public\n    License as published by the Free Software Foundation; either\n    version 2.1 of the License, or (at your option) any later version.\n\n    This library is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n    Lesser General Public License for more details.\n\n    You should have received a copy of the GNU Lesser General Public\n    License along with this library; if not, write to the Free Software\n    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n\nAlso add information on how to contact you by electronic and paper mail.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the library, if\nnecessary.  Here is a sample; alter the names:\n\n  Yoyodyne, Inc., hereby disclaims all copyright interest in the\n  library `Frob' (a library for tweaking knobs) written by James Random Hacker.\n\n  <signature of Ty Coon>, 1 April 1990\n  Ty Coon, President of Vice\n\nThat's all there is to it!\n"
  },
  {
    "path": "licenses/ffmpeg.txt",
    "content": "This is a FFmpeg Win32 Shared build by puku0x.\n\nffmpeg version 2.2.1 Copyright (c) 2000-2014 the FFmpeg developers\n  built on Apr 17 2014 00:33:55 with gcc 4.7.2 (GCC)\n  configuration: --enable-shared --disable-gpl\n  libavutil      52. 66.100 / 52. 66.100\n  libavcodec     55. 52.102 / 55. 52.102\n  libavformat    55. 33.100 / 55. 33.100\n  libavdevice    55. 10.100 / 55. 10.100\n  libavfilter     4.  2.100 /  4.  2.100\n  libswscale      2.  5.102 /  2.  5.102\n  libswresample   0. 18.100 /  0. 18.100\n\nffmpeg is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense as published by the Free Software Foundation; either\nversion 2.1 of the License, or (at your option) any later version.\n\nffmpeg is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\nLesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public\nLicense along with ffmpeg; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n"
  },
  {
    "path": "licenses/glut.txt",
    "content": "\n\n                GLUT for Win32 README\n                ---------------------\n\n\nVERSION/INFO:\n\n    This is GLUT for Win32 version 3.7.6 as of Nov 8th 2001.\n    See the COPYRIGHT section for distribution and copyright notices.\n    Send all bug reports and questions for this version of GLUT to \n    Nate Robins [nate@pobox.com].\n\n    For more information about GLUT for Win32, see the web page:\n    www.pobox.com/~nate/glut.html or subscribe to the GLUT for Win32 \n    mailing list by sending e-mail to majordomo@perp.com with \n    \"subscribe glut\" in the body of the message.\n\n    For general information about GLUT, see the GLUT web page:\n    http://reality.sgi.com/opengl/glut3/glut3.html and be sure to\n    check the GLUT FAQ first for any questions that you may have:\n    http://reality.sgi.com/opengl/glut3/glut-faq.html\n\n\nCOMPILING/INSTALLATION:\n\n    o  Precompiled versions of the DLL and import library can be\n       found on the GLUT for Win32 web page mentioned above.\n\n    o  Microsoft Developer Studio 6 workspace and project files have\n       been included in the source code distribution.\n       \n       To build the glut dll: \n       First, open Microsoft Developer Studio.\n       Then, select File -> Open Workspace and find the glut.dsw file\n       in the file dialog and double-click on it.  \n       Finally, select Build -> Build glut32.dll.\n       When the build is finished, it will copy:\n       glut32.dll to %WinDir%\\System, \n       glut32.lib to $(MSDevDir)\\..\\..\\VC98\\lib, and \n       glut.h     to $(MSDevDir)\\..\\..\\VC98\\include\\GL.\n       \n       Additional workspace files have been included in the progs, test\n       and lib directories to build the progs, tests and libs respectively.\n\n\nBORLAND NOTES:\n\n    From what I understand, Borland supplies a utility that\n    converts Microsoft Visual C++ .libs into Borland compatible\n    files.  Therefore, the best method for Borland users is\n    probably to get the precompiled versions of the library and\n    convert the library.  To create an import library for Borland \n    from the DLLs, use the following command (from a command prompt):\n          IMPLIB glut32.lib glut32.dll\n    If IMPLIB crashes when called this way, try\n          IMPLIB glut32.lib glut32.def\n    using the glut32.def file in this distribution.\n\n\nFORTRAN NOTES:\n\n    Bill Mitchell [william.mitchell@nist.gov] has put considerable\n    effort into getting GLUT to work with different compilers for\n    Fortran 90.  He indicates that you should copy the f90glut.h\n    file to your $(MSDevDir)\\..\\..\\VC98\\include\\GL directory.  \n    Then, just build GLUT as usual.  The Fortran 90 interface, f90gl, \n    can be obtained at http://math.nist.gov/f90gl and contains \n    installation instructions and usage examples.\n\n\nMISC NOTES:\n\n    o  Overlay support is not implemented, nor are there any plans to \n       implement it in the near future.\n\n    o  To customize the windows icon, you can use the resource name\n       GLUT_ICON.  For example, create an icon named \"glut.ico\", and\n       create a file called glut.rc that contains the following:\n       GLUT_ICON ICON glut.ico\n       then compile the glut.rc file with the following:\n       rc /r glut\n       and link the resulting glut.res file into your executable\n       (just like you would an object file).\n       Alternatively, you can simply add the glut.rc file to your\n       project if you are using Microsoft Developer Studio.\n\n\nIMPLEMENTATION DEPENDENT DIFFERENCES:\n\n    There are a few differences between the Win32 version of GLUT\n    and the X11 version of GLUT.  Those are outlined here.  Note\n    that MOST of these differences are allowed by the GLUT\n    specification.  Bugs and unsupported features are outlined in\n    the UNSUPPORTED/BUGS section.\n\n    o  glutInit:\n       The following command line options have no meaning (and are\n       ignored) in GLUT for Win32:\n       -display, -indirect, -direct, -sync.\n\n    o  glutInitWindowPosition, glutPositionWindow:\n       Win32 has two different coordinate systems for windows.\n       One is in terms of client space and the other is the whole\n       window space (including the decorations).  If you\n       glutPositionWindow(0, 0), GLUT for Win32 will place the\n       window CLIENT area at 0, 0.  This will cause the window\n       decorations (title bar and left edge) to be OFF-SCREEN, but\n       it gives the user the most flexibility in positioning.\n       HOWEVER, if the user specifies glutInitWindowPosition(0, 0),\n       the window is placed relative to window space at 0, 0.\n       This will cause the window to be opened in the upper left\n       corner with all the decorations showing.  This behaviour is\n       acceptable under the current GLUT specification.\n\n    o  glutSetIconTitle, glutSetWindowTitle:\n       There is no separation between Icon title and Window title\n       in Win32.  Therefore, setting an icon title in Win32 has\n       no effect.\n\n    o  glutSetCursor:\n       As indicated in the GLUT specification, cursors may be\n       different on different platforms.  This is the case in GLUT\n       for Win32.  For the most part, the cursors will match the\n       meaning, but not necessarily the shape.  Notable exceptions\n       are the GLUT_CURSOR_INFO & GLUT_CURSOR_SPRAY which use the\n       crosshair cursor and the GLUT_CURSOR_CYCLE which uses the\n       'no' or 'destroy' cursor in Win32.\n\n    o  glutVisibilityFunc:\n       Win32 seems to be unable to determine if a window is fully\n       obscured.  Therefore, the visibility of a GLUT window is\n       only reflected by its Iconic, Hidden or Shown state.  That\n       is, even if a window is fully obscured, in GLUT for Win32,\n       it is still \"visible\".\n\n    o  glutEntryFunc:\n       Window Focus is handled differently in Win32 and X.\n       Specifically, the \"window manager\" in Win32 uses a \"click to\n       focus\" policy.  That is, in order for a window to receive\n       focus, a mouse button must be clicked in it.  Likewise, in\n       order for a window to loose focus, a mouse button must be\n       clicked outside the window (or in another window).\n       Therefore, the Enter and Leave notification provided by GLUT\n       may behave differently in the Win32 and in X11 versions.\n       There is a viable workaround for this.  A program called\n       \"Tweak UI\" is provided by Microsoft which can be used to\n       change the focus policy in Win32 to \"focus follows mouse\".\n       It is available from the Microsoft Web Pages:\n       http://www.microsoft.com/windows/software/PowerToy.htm\n\n    o  glutCopyColormap:\n       GLUT for Win32 always copies the colormap.  There is never\n       any sharing of colormaps.  This is probably okay, since\n       Win32 merges the logical palette and the physical palette\n       anyway, so even if there are two windows with totally\n       different colors in their colormaps, Win32 will find a\n       (hopefully) good match between them.\n\n    o  glutIdleFunc + menus:\n       The glut idle function will NOT be called when a menu is\n       active.  This causes all animation to stop when a menu is\n       active (in general, this is probably okay).  Timer\n       functions will still fire, however.  If the timer callback\n       draws into the rendering context, the drawing will not show\n       up until after the menu has finished, though.\n\n\nUNSUPPORTED/BUGS:\n\n    o  glutAttachMenu:\n       Win32 only likes to work with left and right mouse buttons.\n       Especially so with popup menus.  Therefore, when attaching\n       the menu to the middle mouse button, the LEFT mouse button\n       must be used to select from the menu.\n\n    o  glutSpaceball*, glutButtonBox*, glutTablet*, glutDials*:\n       None of the special input devices are supported at this\n       time.\n\n    o  When resizing or moving a GLUT for Win32 window, no updating\n       is performed.  This causes the window to leave \"tracks\" on\n       the screen when getting bigger or when previously obscured\n       parts are being revealed.  I put in a bit of a kludgy\n       workaround for those that absolutely can't have the weird\n       lines.  The reshape callback is called multiple times for\n       reshapes.  Therefore, in the reshape callback, some drawing\n       can be done.  It should probably be limited to a color buffer \n       clear.\n\n    o  The video resizing capabilities of GLUT 3.3+ for X11 is\n       currently unimplemented (this is probably ok, since it\n       really isn't part of the spec until 4.0).  I doubt that\n       this will ever be part of GLUT for Win32, since there is no\n       hardware to support it.  A hack could simply change the\n       resolution of the desktop.\n\n\nCHANGES/FIXES:\n\n    (Nov 8, '01)\n    x  Released 3.7.6\n\n    (Nov 8, '01)\n    x  Changed fullscreen mode from TOPMOST back to simply TOP, since \n       (it turns out) many people use windows atop a GLUT window.\n\n    (Nov 8, '01)\n    x  Added code to prevent CPU spiking when no idle function is \n       registered.  Otherwise, if an idle function is registered, spike\n       CPU so that the idle function gets all the attention it needs and\n       if this is a problem on the program side, the user can stick a \n       sleep() in their idle function.  I believe that this strikes the\n       best balance betweeen GLUT being fast, and also being \"nice\" to \n       other processes.  Thanks to James Wright for reporting this bug.\n\n    (Nov 8, '01)\n    x  Fixed bug in motion callback handler which wasn't setting the\n       current window, so multiple window apps (e.g., any GLUI app)\n       wouldn't get the callback correctly.\n\n    (Oct 4, '01)\n    x  Fixed bug in glutEnterGameMode() that caused new windows to not \n       be in \"fullscreen\" mode, so they got window decorations.\n\n    (Oct 4, '01)\n    x  Fixed bug in glutEnterGameMode() that caused new windows to not \n       be in \"fullscreen\" mode, so they got window decorations.\n\n    (Oct 3, '01)\n    x  Fixed bug in getVisualInfoFromString(): visuals not reloaded on \n       display mode change.  Reload visuals each time they are queried.\n       This fixes a problem with Win32 because the list of availabe Visuals\n       (Pixelformats) changes after a change in displaymode. The problem \n       occurs when switching to gamemode and back.  Thanks to Michael \n       Wimmer for pointing this out & providing the fix.\n\n    (Oct 3, '01)\n    x  Fixed bug in XGetVisualInfo(): pixelformats enumerated incorrectly.\n       Passing 0 as a pixelformat index to DescribePixelFormat gives\n       unpredictible results (e.g., this fails on the Voodoo opengl32.dll \n       and always reports 0 as the last available pixelformat index).\n       Thanks to Michael Wimmer for pointing this out & providing the fix.\n\n    (Oct 3, '01)\n    x  Fixed bug in glXGetConfig(): pixelformats enumerated incorrectly.  The\n       test was OpenGL support OR draw to window, but should be AND.  Thanks\n       to Michael Wimmer for pointing this out & providing the fix.\n\n    (Sep 28, '01)\n    x  Fixed glutChangeToSubMenu()/glutChangeToMenuEntry() bug where if you \n       went back and forth between a submenu and a plain entry, the submenu \n       wouldn't be updated properly.\n\n    (Sep 28, '01)\n    x  glutSetIconTitle() is now a nop.\n\n    (Sep 28, '01)\n    x  glutFullScreen() now sets the window as TOPMOST, therefore, the \n    window will always be on top (this essentially disables alt-tabbing).\n\n    (Sep 28, '01)\n    x  The key repeat ignore flag is now honored correctly.\n\n    (Sep 28, '01)\n    x  Key presses are now reported more accurately and fully, in \n       particular, modified up events (i.e., SHIFT-2) are now reported \n       correctly.\n\n    (Sep 28, '01)\n    x  Subwindows nested arbitrarily deep get their keyboard callbacks \n       correctly now.\n\n    (Sep 28, '01)\n    x  Major rewrite of the window procedure code to clean it up and make\n       way for other bug fixes.\n\n    (Sep 23, '01)\n    x  Fixed noof example program to use RAND_MAX instead of assumed\n       max of 2147483647.0.  (Now it looks _much_ better!)\n\n    (Sep 22, '01)\n    x  Fixed sunlight example program.  globe.raw data file was corrupt,\n       added a new one.\n\n    (Sep 22, '01)\n    x  Fixed zcomposite example program to print message if overlay\n       support is not found (instead of crashing).\n\n    (Jan 22, '01)\n    x  Fixed malloc(0) bug in Win32 version of XGetVisualInfo.  Thanks \n       to Kekoa Proudfoot for bringing this to my attention.\n\n    (Dec 12, '00)\n    x  Added data files for the advanced & advanced97 programs.\n\n    (Dec 12, '00)\n    x  Added Developer Studio 6 project and workspace files for pretty \n       much everything (the stuff left out was usually unix specific).\n\n    (Dec 7, '00)\n    x  Fixed several compilation problems & corrupt files.  Thanks to \n       Alexander Stohr for bringing these to my attention and providing \n       detailed fixes.\n\n    (Dec 6, '00)\n    x  Fixed compiler support for lcc.  Thanks to Gordon for bringing \n       this to my attention and debugging fixes.\n\n    (Nov 8, '00)\n    x  Fixed submenu problem (sometimes the menu callback was not \n       called for valid items).  Thanks to Michael Keeley.\n\n    (Oct 16, '00)\n    x  Corrected corrupt duck.iv file.  Thanks to Jon Willeke for finding \n       this problem.\n\n    (Sept 27, '00)\n    x  Fixed bug in processWorkList that could cause a hang.  Thanks to\n       Bill Volz & Daniel Azuma.\n\n    (Sept 26, '00)\n    x  Added mui DLL project file (thanks to DMWeldy@ugsolutions.com).\n\n    (Sept  9, '00)\n    x  Fixed Delete key bug (crash when no keyboard callback was\n       registered, but a special key callback was).  Thanks to\n       Kent Bowling (kent_bowling@hotmail.com) for finding this bug.\n\n    (May 18, '00)\n    x  Fixed subwindow keyboard callbacks.\n\n    (May 22, '97)\n    o  Menus don't work under Windows 95\n    x  Fixed!  Added a unique identifier to each menu item, and a \n       search function to grab a menu item given the unique identifier.\n\n    (May 21, '97)\n    o  A few minor bug fixes here and there.\n    x  Thanks to Bruce Silberman and Chris Vale for their help with\n       this.  We now have a DLL!\n\n    (Apr 25, '97)\n    o  DLL version of the library is coming (as soon as I figure out\n       how to do it -- if you know, let me know).\n    x  Thanks to Bruce Silberman and Chris Vale for their help with\n       this.  We now have a DLL!\n\n    (Apr 24, '97)\n    x  Added returns to KEY_DOWN etc messages so that the F10 key\n       doesn't toggle the system menu anymore.\n\n    (Apr 7, '97)\n    o  Palette is incorrect for modes other than TrueColor.\n    x  Fixed this by forcing a default palette in modes that aren't\n       Truecolor in order to 'simulate' it.  The applications\n       program shouldn't have to do this IMHO, but I guess we\n       can't argue with Microsoft (well, we can, but what good\n       will it do?).\n\n    (Apr 2, '97)\n    x  Added glut.ide file for Borland users.\n\n    (Apr 2, '97)\n    x  Fixed a bug in the WM_QUERYNEWPALETTE message.  Wasn't\n       checking for a null colormap, then de-ref'd it.  Oops.\n\n    (Mar 13, '97)\n    o  glutTimerFunc: \n       Currently, GLUT for Win32 programs busy waits when there is\n       an outstanding timer event (i.e., there is no select()\n       call).  I haven't found this to be a problem, but I plan to\n       fix it just because I can't bear the thought of a busy wait.\n    x  Added a timer event and a wait in the main loop.  This fixes\n       the CPU spike.\n\n    (Mar 11, '97)\n    x  Fixed subwindow visibility.  The visibility callback of\n       subwindows wasn't being called, now it is.\n\n    (Mar 11, '97)\n    o  glutGetHDC, glutGetHWND:\n       In order to support additional dialog boxes, wgl fonts, and\n       a host of other Win32 dependent structures, two functions\n       have been added that operate on the current window in GLUT.\n       The first (glutGetHDC) returns a handle to the current\n       windows device context.  The second (glutGetHWND) returns\n       handle to the current window.\n    x  Took these out to preserve GLUT portability.\n\n    (Mar 11, '97)\n    x  Fixed the glutWarpPointer() coordinates.  Were relative to\n       the screen, now relative to window (client area) origin\n       (which is what they're supposed to be).\n\n    (Mar 11, '97)\n    o  glutCreateMenu, glutIdleFunc:\n       Menu's are modal in Win32.  That is, they don't allow any\n       messages to be processed while they are up.  Therefore, if\n       an idle function exists, it will not be called while\n       processing a menu.\n    x  Fixed!  I've put in a timer function that fires every\n       millisecond while a menu is up.  The timer function handles\n       idle and timer events only (which should be the only\n       functions that are firing when a menu is up anyway).\n\n    (Mar 7 '97)\n    x  Fixed minor bugs tracked down by the example programs.\n\n    (Mar 6, '97)\n    x  Merged 3.3 GLUT for X11 into 3.2 GLUT for Win32.  New code\n       structure allows for EASY merging!\n\n    o  In Win32, the parent gets the right to set the cursor of\n       any of its children.  Therefore, a child windows cursor\n       will 'blink' between its cursor and its parent.\n    x  Fixed this by checking whether the cursor is in a child\n       window or not.\n\n    (Feb 28 '97)\n    o  On initial bringup apps are getting 2 display callbacks.\n    x  Fixed by the Fev 28 re-write.\n\n    o  Some multiple window (not subwindow) functionality is messed up.\n       See the sphere.exe program.\n    x  Fixed by the Feb 28 re-write.\n\n    o  GLUT for Win32 supports color index mode ONLY in a paletted\n       display mode (i.e., 256 or 16 color mode).\n    x  Fixed this in the re-write.  If you can get a color index\n       visual (pixel format) you can use color index mode.\n\n    (Feb 28 '97)\n    o  Quite a few bugs (and incompatibilities) were being caused\n       by the structure that I used in the previous port of GLUT.\n       Therefore I decided that it would be best to \"get back to\n       the roots\".  I re-implemented most of glut trying to stick\n       with the structure layed out by Mark.  The result is a much\n       more stable version that passes ALL (!) (except overlay)\n       the tests provided by Mark.  In addition, this new\n       structure will allow future enhancements by Mark to be\n       integrated much more quickly into the Win32 version.  Also,\n       I'm now ordering the bugs in reverse, so that the most\n       recently fixed appear at the top of the list.\n\n    (9/8/96)\n    o  Changed the glutGetModifiers code to produce an error if not\n       called in the core input callbacks.\n\n    (9/11/96)\n    o  If the alt key is pressed with more than one other modifier key\n       it acts as if it is stuck -- it stays selected until pressed\n       and released again.\n    x  Fixed. \n\n    (9/12/96)\n    o  When a submenu is attached to a menu, sometimes a GPF occurs.\n       Fixed.  Needed to set the submenu before referencing it's members.\n\n    o  Kenny: Also, one little problem, I attached the menu to the \n       right-button, but when the left-button is pressed I detach\n       it to give the right-button new meaning; if I pop-up the menu and I\n       don't want to select anything, like most users, I click off of the\n       menu to make it disappear. When I do this, I get a GLUT error and \n       the program terminates because I am altering the menu attachment \n       from within the button press while the menu is active. \n    x  Fixed.  Needed to finish the menu when the user presses the button,\n       not just when a button is released.\n\n    o GLUT for Win32 emulates a middle mouse button by checking if\n       both mouse buttons are down.  This causes a lot of problems with \n       the menu and other multiple mouse button things.  \n    x  Fixed.  No more middle mouse button emulation.  Perhaps it would\n       be a good idea to emulate the middle mouse button (if not present)\n       with a key?\n\n    (9/15/96)\n    o  Added code to accept a user defined icon.  If no icon is provided,\n       a default icon is loaded.\n\n    (9/19/96)\n    o  Shane: Command line options seem to be screwed up. (9/13)\n    x  Fixed.  The geometry command line was broken, and so was the\n       gldebug command line.\n\n    o  Fixed a bug in the default glut reshape.  It was looking for the\n       parent of the current window and GPF'ing if there wasn't a parent.\n       Put in a check for a parent, and if none is there, use the\n       child.\n\n    o  Idle function sucks up all processor cycles. (9/8/96)\n    x  I don't know if this is avoidable.  If you have a tight rendering\n       loop, it may be that the processor time is going to be sucked up\n       no matter what.  You can add a sleep() to the end of your render\n       loop if you would like to yeild to other processes and you don't\n       care too much about the speed of your rendering loop.  If you have\n       Hardware that supports OpenGL (like a 3Dpro card, or GLint card) \n       then this should be less of a problem, since it won't be rendering\n       in software. (9/11/96)\n\n    o  If a window is fully obscured by another window, the visibility\n       callback is NOT called.  As far as I can tell, this is a limitation\n       of the Win32 api, but a workaround is being searched for. (9/8/96)\n    x  Limitation of the Win32 API\n\n    o  Fixed the entry functions.  They only work if the keyboard focus\n       changes.  Therefore, in most Win32 systems, the mouse must be\n       pressed outside of the window to get a GLUT_LEFT message and\n       then pressed inside the window for a GLUT_ENTERED message.\n\n    o  Alt modifier key doesn't work with keyboard callback. (9/8/96)\n    x  Probably okay, because the glut spec says that these keys can\n       be intercepted by the system (which the alt key is...) (9/11/96)\n\n    (11/17/96)\n    o  glutRemoveMenuItem() not working properly.\n    x  Thanks to Gary (grc@maple.civeng.rutgers.edu) for the fix to\n       this one.\n\n    o  Timer functions are messed up.\n    x  Thanks to Joseph Galbraith for the fix to this one.\n\n    (12/9/96)\n    o  One (minor) difference came up between the X version of glut\n       and the nt one which you should know about. It is not a new\n       problem, and it concerns co-ords returned to the pointer\n       callbacks. (glutMotionFunc, glutMouseFunc)\n       Under X, you get co-ords in the range 0 +/- 2^15, under NT\n       you get 0..2^16. This is only really a problem when moving\n       above or to the left of the window.\n       eg dragging one pixel ABOVE the window will give :-\n       under x11 :      y = -1\n       under nt  :      y = 2^16 -1\n    x  Put in fix provided by Shane Clauson.\n\n    (12/17/96)\n    o  Idle functions not working properly for multiple windows.\n    x  Fixed this by posting an idle message to every window in the \n       window list when idle.\n\n    (12/18/96)\n    o  glutSetCursor() was misbehaving (lthomas@cco.caltech.edu).\n    x  Win32 requires that the hCursor member of the window class\n       be set to NULL when the class is registered or whenever the\n       mouse is moved, the original cursor is replaced (go\n       figure!).  Now sets the cursor whenever a WM_MOUSEMOVE message\n       is received, because the WM_SETCURSOR event resets the cursor\n       even when in the decoration area.\n\n    o  Geometry is not being handled quite right.  The numbers don't\n       take into account the window decorations.  That is, if I say\n       make a window 100x100, then the WHOLE window (not just the\n       client area) is 100x100.  Therefore, the client (opengl) area\n       is smaller than 100x100. (9/8/96)\n    x  Fixed.  Added code to subtract the decoration size on glutGet()\n       and add the decoration size on glutReshapeWindow().\n\n    o  Multiple glutPostRedisplay() calls are NOT being combined.\n       To get round the \"coalesce\" problem on glutPostRedisplay,\n       the easiest solution is to roll-your-own coalesce by\n       keeping a global \"dirty\" flag in the app (eg replace all\n       calls to glutPostRedisplay with image_dirty=TRUE;), and to\n       handle image_dirty with a single glutPostRedisplay in the\n       idle callback when required.  (erk - but increases\n       performance for my particular app (a rendering engine on\n       the end of a pipleine with a stream of graphics updates) by\n       a couple of orders of magnitude ! ) (9/8/96)\n    x  Added code to coalesce redisplays.  Every idle cycle, a\n       check is made to see which windows need redisplay, if they\n       need it, a redisplay is posted.  The glutPostRedisplay()\n       call is just a stub that sets a flag.\n\n\nTHANKS:\n\n    Special thanks to the following people for extensive testing, \n    suggestions, fixes and help:\n\n    Alexander Stohr\n    Shane Clauson\n    Kenny Hoff\n    Richard Readings\n    Paul McQuesten\n    Philip Winston\n    JaeWoo Ahn\n    Joseph Galbraith\n    Paula Higgins\n    Sam Fortin\n    Chris Vale\n    Bill Mitchell\n\n    and of course, the original author of GLUT:\n    Mark Kilgard.\n\n    and many others...\n\n\nCOPYRIGHT:\n\nThe OpenGL Utility Toolkit distribution for Win32 (Windows NT &\nWindows 95) contains source code modified from the original source\ncode for GLUT version 3.3 which was developed by Mark J. Kilgard.  The\noriginal source code for GLUT is Copyright 1997 by Mark J. Kilgard.\nGLUT for Win32 is Copyright 1997 by Nate Robins and is not in the\npublic domain, but it is freely distributable without licensing fees.\nIt is provided without guarantee or warrantee expressed or implied.\nIt was ported with the permission of Mark J. Kilgard by Nate Robins.\n\nTHIS SOURCE CODE IS PROVIDED \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER\nEXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\nOR MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.\n\nOpenGL (R) is a registered trademark of Silicon Graphics, Inc.\n"
  },
  {
    "path": "licenses/opencv.txt",
    "content": "By downloading, copying, installing or using the software you agree to this license.\nIf you do not agree to this license, do not download, install,\ncopy or use the software.\n\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2000-2015, Intel Corporation, all rights reserved.\nCopyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\nCopyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.\nCopyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.\nCopyright (C) 2015, OpenCV Foundation, all rights reserved.\nCopyright (C) 2015, Itseez Inc., all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are disclaimed.\nIn no event shall copyright holders or contributors be liable for any direct,\nindirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n"
  },
  {
    "path": "licenses/parrot.txt",
    "content": "IMPORTANT: INSTRUCTION TO USE AND TO APPLY THE TERMS OF THE LICENSE TO ANY NEW PROGRAM\n\nREAD BEFORE DOWNLOADING, COPYING, INSTALLING OR USING\n   \n\nCopyright (C) 2007-2011, PARROT SA, all rights reserved.\n\n\nYou may use, copy, modify the PARROT AR.Drone SDK and APIs or any portion of it, and thus form a work based on Parrot SDK and APIs, and copy and redistribute in source code and binary forms, with or without modification, provided that you comply with following conditions:\n\n\tRedistribution in source code, with or without modification, must retain Parrot copyright notice, the following disclaimer and the license to develop and use in a text file named Parrot License.\n\n\tRedistribution in binary form must reproduce Parrot copyright notice, the following disclaimer in the product documentation or legal notice.\n\n\tThe name of Parrot may not be used to endorse or promote products derived from the APIs without specific prior written permission.\n\n\nDISCLAIMER\nThe APIs is provided by PARROT and contributors \"AS IS\" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall PARROT and contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.  \n\n\nDEVELOPMENT LICENSE OF THE PARROT AR.DRONE SDK and APIs V2.0\n(Creation of games for the Parrot AR.DRONE)\n\n\n\n\nArticle 1: Purpose \nThe purpose of the present Development License is to define the terms and conditions under which the Developer is authorized to use the source codes of the PARROT SDK and APIs to create under its own responsibility a Game for the AR.Drone and to market it for free or against payment.\n\nArticle 2: Definitions\nThe terms defined hereunder, used in singular or plural, shall have the following meaning:\n\n- PARROT SDK and APIs : means the AR.Drone software development kit and AR.Drone APIs and  associated documentation, allowing to pilot the PARROT AR.Drone, from a mobile phone, a console game, a computer or any other electronic device, and which source codes are provided for free to the Developer;\n-  Developer  means a physical person, of age, having the capacity to accept the terms of the present License or a person, representing a company and having all powers to that effect to bind the company; \n- PARROT AR.Drone or PARROT AR.Drone or  PARROT Drone means the augmented reality drone developed by PARROT, without pilot, remote-controlled by a mobile phone, a console game, a computer or any other electronic device;\n-  Interoperability : means the ability of an application to exchange information with the Parrot AR.Drone or any its accessories;\n-  Game for AR.Drone : means a software or a video game, created by the Developer from the PARROT SDK and APIs  or any modified version, and which functionalities shall exclusively be dedicated to the use of the PARROT AR.Drone  for entertaining, game, leisure or training purpose or any other purpose compatible with the terms of the present license;\n-  License  means the present license of development and use of the APIs; \n-  User Account means the information relating to the identification of a Developer, such as first name, surname, email address, login, password, company, title;\n-  User : means any physical person using the PARROT Drone or a Game for AR.Drone.\n\nARTICLE 3  Identification\n3.1 The downloading of the APIs is subject to the Identification of the Developer by filling in the online registration form and accept the terms of the License by clicking the acceptance box on https://projects.ardrone.org .  \n3.2 The Developer commits to provide accurate information and to update the information, if necessary. \n\n3.3 The User Account is personal and confidential; it cannot be assigned to a third party.\n\n3.4 The Developer commits to inform PARROT immediately of any disclosure, non authorized use by a third party of the login and/or password of its User Account. In such case, PARROT shall be entitled to invalid the login and the password.  \n\n\nARTICLE 4   Conditions of Use\nNotwithstanding the limitation and restrictions mentioned in article 5, PARROT grants to the Developer, who accepts, a personal, non-assignable, non-exclusive, worldwide, free license of development of the PARROT SDK and APIs authorizing the Developer to: \n\treproduce, without number limitation, the APIs on any device under its responsibility, and necessary to create the Game for AR.Drone;\n\ttranslate, adapt, arrange, modify the APIs  in order to create a Game for AR.Drone in the software and hardware environment chosen by the Developer;\n\tto market, for free or against fees, copies of the Game for AR.Drone created ;\n\tto grant licenses of the Game for AR.Drone to the Users of the PARROT Drone.\n\n\nARTICLE 5  Restrictions to the license\n5.1 The License of development and use of the APIs is subject to the acceptance and to the respect by the Developer without any reserves of all restrictions and limitations listed hereafter:  \n\nConsequently, PARROT expressly forbids the Developer:\n(i) \tTo access or use of the PARROT SDK and APIs from a technology or means others than those provided with the APIs;\n(ii) \tTo market copies of the APIs, for free or against fees, and to distribute, sub-license, rent, sell, transfer, commercialize, publish or generally put the APIs to a third party disposal; \n(iii) \tTo do reverse engineering, decompile or attempt to extract the Source Codes of the PARROT Drone; under special legal conditions, necessary information for interoperability purpose might be requested from PARROT ;\n(iv) \tTo destroy, or alter any warning and copyrights notices;\n(v) \tTo use the PARROT SDK and APIs to develop an application other than a Game for AR.Drone. The Game for AR.Drone, shall have for sole purpose to be used by a User for entertaining, game, leisure or training. The creation of applications for the use of the PARROT Drone  for professional use or use such as but not limited to military, and, without limitation, security, watching, spying, defence, cartography, is strictly forbidden. \n(vi) \tTo use the PARROT SDK and APIs or to create a Game for AR.Drone breaching the terms of: \n(1)  The License;\n(2)  Third party rights; \n(3)  Applicable laws and regulations; \n(4)  Any instruction provided by PARROT. \n\n5.2 Therefore, and without limitation, the Developer commits when using the APIs or when creating a Game for AR.Drone:\na) not to infringe any applicable laws and regulation which the Developer shall determine whatever the country where he intends to develop and/or market the Game for AR.Drone ; \nb) not to reproduce, represent, put contents which infringe copyrights, patents, trademarks, design, model, know-how, commercial secret and any intellectual property rights belonging to PARROT or to third parties ;\nc) not to falsify or remove copyrights, trademarks notices of any other proprietary rights of PARROT figuring in the Application;\nd) not to display a Game for AR.Drone which falsely or implied would suggest an endorsement or any approbation from PARROT ;\ne) not to collect  or treat, or store, with the Game for AR.Drone, personal data from third, especially User of the Game for AR.Drone without having previously asked for their consent. Shall the Game for AR.Drone store personal data, it should be in compliance with the applicable law \n\n5.3 PARROT is sole judge of the compliance of the Game for AR.Drone with the terms and conditions of the present License.\n\n5.4 PARROT reserves the right to correct or modify the PARROT SDK and APIs during the License duration. \n\nARTICLE 6  Duration of the License\n\nThe License of the PARROT SDK and APIs is granted for the duration of the intellectual property rights of the Game for AR.Drone. It enters into force upon acceptance by the Developers by clicking the acceptance box or using a modified release of the PARROT SDK and APIs.\n\n\nARTICLE 7  Termination of the License\n\n7.1 PARROT reserves the right to terminate the present License, without notice, in following cases:  \n\ni.\tThe Developer  has created a Game for AR.Drone in violation of the present License terms, any applicable law and regulation or PARROT has objective reasons to believe that the Game for AR.Drone is infringing the License or any applicable law and regulation ;\nii.\tThe Developer has created a Game for AR.Drone in violation of PARROTs intellectual property rights  or PARROT has objective reasons to believe that the Game for AR.Drone is infringing its rights ; \niii.\tThe Developer  has created a Game for AR.Drone in violation of a third partys intellectual property rights  or PARROT has objective reasons to believe that the Game for AR.Drone is infringing a third partys rights ; \niv.\tThe Developer has created a Game for AR.Drone containing bugs, viruses, worms, defects, Trojan horses, or any items of a destructive nature or PARROT has objective reasons to believe that the Game for AR.Drone contains of this item;\n\n7.2 Termination of the License shall be notified by email to any user breaching the terms of the License.\n\n7.3 In case of closing of a User Account or termination of the License, for whatever reason, articles which by their nature shall survive shall continue to be applicable, in particular articles 11.RIGHTS OF PARROT; 12.DISCLAIMER; 13.LIMITATION OF RESPONSIBILITY; 14.INDEMNITY; 17.GENERALS PROVISIONS. \n\nARTICLE 8  Specific Development\n\nUpon request of a video game editor, PARROT may perform specific development of the PARROT SDK and APIs, in order to enable the creation of a Game for AR.Drone for commercial purpose by such editor. The development services provide by PARROT shall be subject to a separate agreement between PARROT and the editor. PARROT and the editor shall share the revenues gained from the sale of games created thanks to the specific development for an amount to be determined by agreement between the parties. \n\n\nARTICLE 9  Upgrade of the APIs \n9.1 Due to technological innovations and for quality and/ or security reasons, the Developer acknowledges and agrees that PARROT may at any time modify the APIs, namely by adjunction, removal, improvement of functionalities, or that PARROT may temporarily or definitely suspend the access to the APIs, at its sole discretion and without notice. PARROT warrants  as far as possible and with no result obligation, the ascendant compatibility of the APIs.  \n\n9.2 PARROT shall notify any modification by publication on https://projects.ardrone.org, or per email, or by any other appropriate mean in PARROT judgment. From the notification, the use of the APIs by the Developer to create new Game for AR.Drone shall be deemed as the acceptance by the Developer of the modified License of the PARROT SDK and APIs.\n\n\n \n\nARTICLE 10  License granted to Parrot by the Developper\n10.1 The Developer is owner of all copyrights and other intellectual property rights on the Game for AR.Drone that he creates. If the Developer is posting his Game for AR.Drone on https://projects.ardrone.org , he grants PARROT a perpetual, irrevocable, worldwide, free and non-exclusive license to reproduce, represent, adapt, arrange , modify, translate, publish, operate and display the Game for AR.Drone by any means of communication, numerical, analogical, electronic..to the public and namely by any network(internet, intranet), wireless or not, by mobile phone, email, by satellite, par optical fibre, par television and on any media.\n\n10.2 This license shall be granted in order to allow PARROT to display, promote, and distribute the PARROT SDK and APIs and/or the PARROT AR.Drone. \n\n10.3 This license includes the right for PARROT to make the Game for AR.Drone available  totally or partially  to any Users of the AR.DRONE PARROT or to any person with who PARROT is in relationship, and to use the Game for AR.Drone for information or advertisement purpose.\n\n10.4 The Developer agrees that PARROT for technical or for improvement purpose, may (a) transmit or communicate the Game for AR.Drone on public network others than internet (wireless or not, namely mobile telephony) and various media (graphic, magnetic, optical, numerical, analogical); and (b) make any modification necessary to adapt and make the Game for AR.Drone compliant to technical specifications so to make it interoperable with networks or devices.  \n\n10.5 The Developer warrants PARROT that he has the rights to grant the license. \n\n\nARTICLE 11: Parrot Rights\n11.1 Intellectual and industrial property rights. \nPARROT is and remains the owner of all rights and interests on the PARROT SDK and APIs and on the PARROT AR.Drone, including without limitation all rights of intellectual and industrial property (copyrights, database rights, patents, trademark, design and model, semi-conductor topography) and/or any rights on the know-how, schemes, plans, algorythme,  technologies, ideas, concepts.\n\nIt is expressly specified that PARROT is owner of patents on the PARROT AR.Drone and that a right to use such patent is granted within the frame of the present License. No other rights on the patents are granted to the Developer who commits no to use the technologies issued from those patents for purpose not in the scope of the present License.  \n\n11.2 Trademarks and logos.\n11.2.1 PARROT is owner of the intellectual property rights on its commercial trade name, trademarks, logos, domain names and any others brand features. PARROT grants the Developer a non-exclusive, non assignable, non transferable, non sub-licensable, free license to use PARROT trademarks and logos for the sole purpose of mentioning that he uses the PARROT SDK and APIs.\n\n11.2.2 When using PARROTs trademarks and logos, the Developer undertakes: \ni.\tNot to display a trademark or a logo in any manner that implies a relationship or affiliation with, sponsorship, or endorsement by PARROT or that can be reasonably interpreted to suggest editorial content has been authored by, or represents the views or opinions of PARROT;\nii.\tNot to use PARROT brand features to disparage PARROT or its products; \niii.\tNot to display a trademark or a logo on its website if it contains or displays adult content or promotes illegal activities, gambling, or the sale of tobacco or alcohol to persons under eighteen (18) years of age;\niv.\tNot to display the PARROT trademark and logo as the most prominent element in any part of the Game for AR.Drone created by the Developer or its packaging;\nv.\tNot to display the PARROT logo as the most prominent logo in the Game for AR.Drone ;\nvi.\tNot to display PARROT trademark or logo in a manner that is misleading, defamatory, infringing, libelous, disparaging, obscene or otherwise objectionable to PARROT;\nvii.\tNot to display a PARROT trademark or logo on a site that violates any law or regulation ; \nviii.\tNot to remove, distort or alter any element of a PARROT brand feature (including squeezing, stretching, inverting, discoloring, etc.). \n\n11.3 The Developer undertakes during the term of the License and after its expiration, not to register or attempt to register any trademark, logo, domain name similar to or confusing with PARROT trademark or logo, in any manner (phonetic, intellectual, visual). PARROT reserves the right to sue for counterfeiting and unfair competition, any Developer who would not respect this commitment and use the trademarks and/or domain name PARROT AR.DRONE OU AR.DRONE in breach of the License.\n \n11.4 The Developer undertakes to immediately remedy to any breach notified by PARROT per email or any other mean concerning any infringement to PARROT intellectual property rights.   \n\n11.5 The Developer, company or physical person, is owner, as applicable, on the intellectual property rights on his name, commercial name, trademarks, logos and any other brand features. He expressly grants PARROT a non exclusive, worldwide and free license to mention his name, commercial name, trademarks, logos, as applicable, to mention that he uses the PARROT SDK and APIs and/or that he has created a Game for AR.Drone. \n\n\nARTICLE 12: DISCLAIMER\n12.1 THE PARROT SDK AND APIs IS PROVIDED AS IS . IN PARTICULAR, PARROT, ITS SUBSIDIARIES, LICENSORS AND THEIR SUPPLIERS, DO NOT REPRESENT OR WARRANT THE DEVELOPER THAT: \n1. ITS USE OF THE APIs WILL MEET ITS REQUIREMENTS; \n2. ITS USE OF THE APIs WILL BE UNINTERRUPTED, TIMELY, SECURE OR FREE FROM ERROR OR WILL OFFER CONSTANT PERFORMANCE;\n3. THAT DEFECTS OR ERRORS WILL BE CORRECTED OR THAT THE APIs WILL BE UPGRADE, PARROT HAVING NO OBLIGATION TO PROVIDE CURATIVE OR EVOLUTIVE SUPPORT;\n4. THE APIs IS COMPLIANT TO ANY SPECIFICATIONS;\n5.  ANY UPGRADE OF THE PARROT SDK AND APIs WILL BE COMPATIBLE WITH PREVIOUS RELEASE.\n \n12.2 PARROT, ITS SUBSIDIARIES, LICENSORS AND THEIR SUPPLIERS, DO NOT REPRESENT OR WARRANT THE DEVELOPER THAT ITS USER ACCOUNT WILL BE AVAILABLE WITHOUT INTERRUPTION. \n\n12.3 NO ADVICE OR INFORMATION, WHETHER ORAL OR WRITTEN, OBTAINED BY THE DEVELOPER FROM PARROT, ITS SUBSIDIARERIES, OR THROUGH THE APIs, SHALL BE CONSTRUED A WARRANTY PROVIDED BY PARROT.\n\n12.4 PARROT, ITS SUBSIDIARIES, ITS LICENSORS DO NOT WARRANT, ANY RESULT, EXPRESS OR IMPLIED, OF ANY NATURE (TECHNICAL, COMMERCIAL, FINANCIAL OR OTHER) FROM THE USE OF THE APPLICATION.\n\n12.5 PARROT DECLARES THAT TO THE BEST OF ITS KNOWLEDGE THE PARROT SDK AND APIs DOES NOT INFRINGE ANY THIRD PARTY INTELLECTUAL PROPERTY RIGHTS.\n\n\nARTICLE 13: LIMITATION OF RESPONSABILITY\n13.1 THE PROVISION EXCLUSING OR LIMITING PARROTS LIABILITY SHALL ONLY BE APPLICABLE IN COUNTRIES WHERE SUCH PROVISIONS ARE LEGAL. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF CERTAIN WARRANTIES OR THE LIMITATION OR EXCLUSION OF LIABILITY FOR CERTAIN TYPES OF LOSS OR DAMAGES. ACCORDINGLY, PARROTS LIABILITY WILL BE LIMITED TO THE MAXIMUM EXTENT PERMITTED BY LAW.\n\n13.2 THE DEVELOPER EXPRESSLY ACKNOWLEDGES AND AGREES THAT THE USE HE DOES OF ITS USER ACCOUNT, THE APIs, THE GAME FOR AR.DRONE THAT HE CREATES, ARE AT ITS SOLE RISK AND RESPONSABILITY. IN PARTICULAR, THE DEVELOPER SHALL BE SOLELY RESPONSIBLE FOR ANY DAMAGE TO ITS COMPUTER SYSTEM OR OTHER DEVICE, LOSS OF DATA, OR ANY OTHER DAMAGE OR INJURY THAT RESULTS FROM THE DOWNLOAD OR USE OF THE APIs.\n\n13.3 THE DEVELOPER EXPRESSLY ACKNOWLEDGES AND AGREES THAT HE SHALL BE SOLELY RESPONSIBLE FOR ALL COSTS, EXPENSES  INCURRING FOR THE USE OF ITEMS MENTIONED ABOVE AS WELL AS ANY DEVELOPMENT AND PRODUCTION COSTS ASSOCIATED TO THE GAME OF AR.DRONE THAT HE IS CREATING. \n\n13.4 PARROT, ITS SUPPLIERS, LICENSORS, AFFILIATES, ARE NOT RESPONSIBLE FOR ANY DIRECTS OR INDIRECTS, MATERIALS OR IMMATERIALS, CONSECUTIVES OR NON CONSECUTIVES DAMAGES, INCLUDING, BUT NOT BE LIMITED TO, ANY LOSS OF PROFIT (WHETHER INCURRED DIRECTLY OR INDIRECTLY), ANY LOSS OF GOODWILL OR BUSINESS REPUTATION, ANY LOSS OF DATA, COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR OTHER INTANGIBLE LOSS, THAT DEVELOPER OR A THIRD PARTY MAY INCURR FROM:\n\n1- THE USE BY THE DEVELOPER OF ITS USER ACCOUNT, THE PARROT SDK AND APIs, THE GAME FOR AR.DRONE THAT HE CREATES;\n2- THE MARKETING OF THE GAME FOR AR.DRONE FOR FREE OR AGAINST FEES;\n3- THE MODIFICATION OF THE APIs BY PARROT;\n4- THE CLOSING OF THE USER ACCOUNT, THE MODIFICATION OR THE EXPIRATION OR TERMINATION OF THE LICENSE;\n5-   ANY NON ACCURATE OR NON UPDATED INFORMATION PROVIDED BY PARROT, \n\n\n\nARTICLE 14: INDEMNITY\nThe Developer warrants and hereby agrees to indemnify, defend and hold PARROT harmless from and against any claim or liability arising out of: (a) the use of the APIs in breach of the License and/or any instruction provided by PARROT; (b) the Game for AR.Drone; (c) any use by Users of the Game for AR.Drone; (d) any claim that the Game for AR.Drone breaches laws or infringes third party rights;  consequently, assume all costs and damages to which PARROT could be condemned by a jurisdiction on such a basis. The Developer shall cooperate as fully as reasonably required in the defence of any claim and PARROT reserves the right, at its own expense, to assume the exclusive defence and control of any matter subject to indemnification by the Developer. And if necessary, to seek equitable relief, including but not limited to preliminary injunction and injunction, in addition to all other remedies.\n\nARTICLE 15: Hyperlinks\n15.1. The https://project.ardrone.org ardrone.org website may include hyperlinks to other web sites or content or resources provided by third parties and companies. PARROT has no control or authority over any web sites, resources, third parties. The Developer acknowledges and agrees that PARROT is not responsible for the availability of any such external sites or resources, and does not endorse any advertising, products or other materials on or available from such web sites or resources.\n\n15.2. The Developer acknowledges and agrees that PARROT is not liable for any loss or damage that may be incurred by the Developer as a result of the availability of those external sites or resources, or as a result of any reliance placed by you on the completeness, accuracy or existence of any advertising, products, or other materials on, or available from, such web sites or resources.\n\n\nARTICLE 16: Language and interpretation\n16.1 The English version of the License shall prevail over any translation, which might only be provided for convenience purpose. Therefore any translation might be provided only for convenience purpose.\n\n16.2 If there is any contradiction between the English language version of the License and a translation of the License, the English language version will take precedence. \n\n16.3 Titles are provided for convenience purpose only; the content of an article shall have precedence on the title. \n\nARTICLE 17: General provisions\n17.1 The License constitutes the entire legal agreement between Parrot and the Developer and completely replace and supersede any prior agreements between PARROT and the Developer.\n \n17.2 The waiver by PARROT to prevail itself from a provision of the License shall not be construed as a waiver to prevail  itself of any right obligation under the License in the future.  \n\n17.3 If any court of law having jurisdiction rules that any provision of this License is invalid, then that provision will be removed from the License without affecting the rest of the License. The remaining provisions of the License will continue to be valid and enforceable.\n\n17.4 Any notice sent by PARROT to the Developer or exchange between the Parties will be validly delivered per email at the address provided by the Developer in its User Account and at legal@ardrone.org for PARROT. \n\nARTICLE 18: Applicable law and jurisdiction\nThe License is governed by French Law, without regard to its conflict of Laws provisions. Any dispute arising out of its interpretation, execution or termination shall be submitted to the exclusive jurisdiction of the relevant court of Paris, even for urgency proceedings or plurality of defendants. Notwithstanding this, PARROT shall be allowed to apply for injunctive remedies (or an equivalent type of urgent legal relief) in any jurisdiction\n\n\nARTICLE 19 - Privacy\n19.1 All information about our privacy policy are provided on www.parrot.com . This policy explains how PARROT treats any personal data which are disclosed to her and protect your privacy. \n\n19.2 The Developer agrees that PARROT may use its personal data in compliance with its privacy policy.   \n\n\nARTICLE 20  Acceptance of the license \nBy clicking the acceptance box, downloading or  using the PARROT SDK and APIs or an adapted or modified release of the PARROT SDK and APIs, the Developer or any user accepts without reserve all terms and conditions of the License concluded between him and PARROT SA  registered under N 394 149 496 and located 174 quai de Jemmapes FRANCE -  which he commits to respect.  \n\n\n\nLast update: November 2010 \nDocument revision V2.0 \n"
  },
  {
    "path": "licenses/parrotdisclaimer.txt",
    "content": "Copyright (C) 2007-2011, PARROT SA, all rights reserved.\n\nDISCLAIMER\nThe APIs is provided by PARROT and contributors \"AS IS\" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall PARROT and contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage."
  },
  {
    "path": "licenses/pthreads-w32.txt",
    "content": "\tpthreads-win32 - a POSIX threads library for Microsoft Windows\n\n\nThis file is Copyrighted\n------------------------\n\n    This file is covered under the following Copyright:\n\n\tCopyright (C) 2001 Ross P. Johnson\n\tAll rights reserved.\n\n\tEveryone is permitted to copy and distribute verbatim copies\n\tof this license document, but changing it is not allowed.\n\nPthreads-win32 is covered by the GNU Lesser General Public License\n------------------------------------------------------------------\n\n    Pthreads-win32 is open software; you can redistribute it and/or\n    modify it under the terms of the GNU Lesser General Public License\n    as published by the Free Software Foundation version 2.1 of the\n    License.\n\n    Pthreads-win32 is several binary link libraries, several modules,\n    associated interface definition files and scripts used to control\n    its compilation and installation.\n\n    Pthreads-win32 is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Lesser General Public License for more details.\n\n    A copy of the GNU Lesser General Public License is distributed with\n    pthreads-win32 under the filename:\n\n\t    COPYING.LIB\n\n    You should have received a copy of the version 2.1 GNU Lesser General\n    Public License with pthreads-win32; if not, write to:\n\n\t    Free Software Foundation, Inc.\n\t    59 Temple Place\n\t    Suite 330\n\t    Boston, MA\t02111-1307\n\t    USA\n\n    The contact addresses for pthreads-win32 is as follows:\n\n\tWeb:\thttp://sources.redhat.com/pthreads-win32\n        Email:  Ross Johnson <rpj@ise.canberra.edu.au>, or\n                Ross.Johnson@canberra.edu.au\n\n\n\nPthreads-win32 copyrights and exception files\n---------------------------------------------\n\n    With the exception of the files listed below, Pthreads-win32\n    is covered under the following GNU Lesser General Public License\n    Copyrights:\n\n\tPthreads-win32 - POSIX Threads Library for Win32\n\tCopyright(C) 1998 John E. Bossom\n\tCopyright(C) 1999,2002 Pthreads-win32 contributors\n\n\tThe current list of contributors is contained\n        in the file CONTRIBUTORS included with the source\n\tcode distribution. The current list of CONTRIBUTORS\n\tcan also be seen at the following WWW location:\n        http://sources.redhat.com/pthreads-win32/contributors.html\n\n\tContact Email: rpj@ise.canberra.edu.au\n\n    These files are not covered under one of the Copyrights listed above:\n\n            COPYING\n\t    COPYING.LIB\n            tests/rwlock7.c\n\n    This file, COPYING, is distributed under the Copyright found at the\n    top of this file.  It is important to note that you may distribute\n    verbatim copies of this file but you may not modify this file.\n\n    The file COPYING.LIB, which contains a copy of the version 2.1\n    GNU Lesser General Public License, is itself copyrighted by the\n    Free Software Foundation, Inc.  Please note that the Free Software\n    Foundation, Inc. does NOT have a copyright over Pthreads-win32,\n    only the COPYING.LIB that is supplied with pthreads-win32.\n\n    The file tests/rwlock7.c is derived from code written by\n    Dave Butenhof for his book 'Programming With POSIX(R) Threads'.\n    The original code was obtained by free download from his website\n    http://home.earthlink.net/~anneart/family/Threads/source.html\n    and did not contain a copyright or author notice. It is assumed to\n    be freely distributable.\n\n    In all cases one may use and distribute these exception files freely.\n    And because one may freely distribute the LGPL covered files, the\n    entire pthreads-win32 source may be freely used and distributed.\n\n\n\nGeneral Copyleft and License info\n---------------------------------\n\n    For general information on Copylefts, see:\n\n\thttp://www.gnu.org/copyleft/\n\n    For information on GNU Lesser General Public Licenses, see:\n\n\thttp://www.gnu.org/copyleft/lesser.html\n\thttp://www.gnu.org/copyleft/lesser.txt\n\n\nWhy pthreads-win32 did not use the GNU General Public License\n-------------------------------------------------------------\n\n    The goal of the pthreads-win32 project has been to\n    provide a quality and complete implementation of the POSIX\n    threads API for Microsoft Windows within the limits imposed\n    by virtue of it being a stand-alone library and not\n    linked directly to other POSIX compliant libraries. For\n    example, some functions and features, such as those based\n    on POSIX signals, are missing.\n\n    Pthreads-win32 is a library, available in several different\n    versions depending on supported compilers, and may be used\n    as a dynamically linked module or a statically linked set of\n    binary modules. It is not an application on it's own.\n\n    It was fully intended that pthreads-win32 be usable with\n    commercial software not covered by either the GPL or the LGPL\n    licenses. Pthreads-win32 has many contributors to it's\n    code base, many of whom have done so because they have\n    used the library in commercial or proprietry software\n    projects.\n\n    Releasing pthreads-win32 under the LGPL ensures that the\n    library can be used widely, while at the same time ensures\n    that bug fixes and improvements to the pthreads-win32 code\n    itself is returned to benefit all current and future users\n    of the library.\n\n    Although pthreads-win32 makes it possible for applications\n    that use POSIX threads to be ported to Win32 platforms, the\n    broader goal of the project is to encourage the use of open\n    standards, and in particular, to make it just a little easier\n    for developers writing Win32 applications to consider\n    widening the potential market for their products.\n"
  },
  {
    "path": "readme.txt",
    "content": "-----------------------------------------------------------------\n CV Drone (= OpenCV + AR.Drone)\n Copyright (C) 2016 puku0x\n https://github.com/puku0x/cvdrone\n-----------------------------------------------------------------\n\nINTRODUCTION\n  CV Drone is free software; you can redistribute it and/or\n  modify it under the terms of EITHER:\n   (1) The GNU Lesser General Public License as published by the Free\n       Software Foundation; either version 2.1 of the License, or (at\n       your option) any later version. The text of the GNU Lesser\n       General Public License is included with this library in the\n       file cvdrone-license-LGPL.txt.\n   (2) The BSD-style license that is included with this library in\n       the file cvdrone-license-BSD.txt.\n\n  This software is distributed in the hope that it will be useful,\n  but WITHOUT ANY WARRANTY; without even the implied warranty of\n  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files\n  cvdrone-license-LGPL.txt and cvdrone-license-BSD.txt for more details.\n\nHOW TO INSTALL\n  Please unzip \"cvdrone-master.zip\" into an arbitrary directory.\n\nHOW TO UNINSTALL\n  Please delete the cvdrone folder.\n\nBEFORE YOU BUILD\n  You should install Visual Studio before you build CV Drone.\n  CV Drone supports VC++2010/2012/2013/2015.\n  To download VS, please see http://www.microsoft.com/visualstudio/eng/downloads .\n\nHOW TO USE\n  1. Open \\build\\vs20xx\\test.sln\n  2. Press F7 to build.\n  3. Press F5 (or Ctrl+F5) to run.\n  4. You can play around with OpenCV. Sample codes are in \"src\\samples\".\n\nFOR AR.DRONE 1.0 USERS\n  Please update your AR.Drone's firmware to 1.11.5.\n\nFOR AR.DRONE 2.0 USERS\n  Please update your AR.Drone's firmware to 2.4.8.\n\nFOR VS2010 USERS\n  You can not build CV Drone by VS2010 after you installed VS2012.\n  To build VS2010, \n    1) You should install \"Visual Studio 2010 SP1\".  [Recommended]\n    or,\n    2) You should uninstall \".Net Framework 4.5\" and re-install \"4.0\".\n\nLIBRARY DEPENDENCIES\n  CV Drone uses following libraries.\n  - OpenCV 3.1.0 <3-clause BSD license>\n    http://opencv.org/\n  - FFmpeg 2.2.3 <LGPL v2.1 license>\n    http://www.ffmpeg.org/\n  - stdint.h/inttypes.h for Microsoft Visual Studio r26 <BSD license>\n    https://code.google.com/p/msinttypes/\n  - POSIX Threads for Win32 2.9.1 <LGPL v2.1 license>\n    http://www.sourceware.org/pthreads-win32/\n\n  Marker-based AR sample uses following libraries adding to the above.\n  - GLUT for Win32 3.7.6\n    http://user.xmission.com/~nate/glut.html\n  - MarkerDetector\n    https://github.com/MasteringOpenCV/code/tree/master/Chapter2_iPhoneAR/Example_MarkerBasedAR/Example_MarkerBasedAR\n\n  License files for each library can be found in the 'licenses' folder.\n\nThank you."
  },
  {
    "path": "samples/camera.xml",
    "content": "<?xml version=\"1.0\"?>\n<opencv_storage>\n<intrinsic type_id=\"opencv-matrix\">\n  <rows>3</rows>\n  <cols>3</cols>\n  <dt>f</dt>\n  <data>\n    5.81399719e+002 0. 3.17410492e+002 0. 5.78456116e+002\n    1.37808365e+002 0. 0. 1.</data></intrinsic>\n<distortion type_id=\"opencv-matrix\">\n  <rows>1</rows>\n  <cols>4</cols>\n  <dt>f</dt>\n  <data>\n    -5.16806960e-001 2.71592855e-001 4.40666080e-003 -1.29973365e-003</data></distortion>\n</opencv_storage>\n"
  },
  {
    "path": "samples/old/sample_camera_calibration.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// Execute calibration\n#define CALIB_MODE  1                   // ON:1 OFF:0\n\n// Parameter for calibration pattern\n#define PAT_ROW    (7)                  // Rows of pattern\n#define PAT_COL    (10)                 // Columns of pattern\n#define PAT_SIZE   (PAT_ROW*PAT_COL)\n#define CHESS_SIZE (24.0)               // Size of a pattern [mm]\n\n// --------------------------------------------------------------------------\n// cvDrawText(Image, Drowing point, Messages)\n// Description  : Draw the specified text.\n// Return value : NONE\n// --------------------------------------------------------------------------\ninline void cvDrawText(IplImage *image, CvPoint point, const char *fmt, ...)\n{\n    // Font\n    static CvFont font = cvFont(1.0);\n\n    // Apply format\n    char text[256];\n    va_list ap;\n    va_start(ap, fmt);\n    vsprintf(text, fmt, ap);\n    va_end(ap);\n\n    // Draw the text\n    cvPutText(image, text, point, &font, CV_RGB(0, 255, 0));\n}\n\n// --------------------------------------------------------------------------\n// cvAsk(Message)\n// Description  : Show a question.\n// Return value : NO:0 YES:1\n// --------------------------------------------------------------------------\ninline int cvAsk(const char *message, ...)\n{\n    char *arg;\n    char str[256];\n\n    // Apply format\n    va_start(arg, message);\n    vsprintf(str, message, arg);\n    va_end(arg);\n\n    // Show message box\n    #ifndef _WIN32\n    //return (MessageBox(NULL, str, \"QUESTION\", MB_YESNO|MB_ICONQUESTION|MB_TOPMOST|MB_SETFOREGROUND) == IDYES);\n    #else\n    char c = 'n';\n    printf(str);\n    scanf(\"%c\", &c);\n    return (c == 'y' || c == 'Y');\n    #endif\n}\n\n#if CALIB_MODE\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Images\n    std::vector<IplImage*> images;\n    printf(\"Press space key to take a sample picture !\\n\");\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(1);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // Convert the camera image to grayscale\n        IplImage *gray = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);\n        cvCvtColor(image, gray, CV_BGR2GRAY);\n\n        // Detect the chessboard\n        int corner_count = 0;\n        CvSize size = cvSize(PAT_COL, PAT_ROW);\n        CvPoint2D32f corners[PAT_SIZE];\n        int found = cvFindChessboardCorners(gray, size, corners, &corner_count, CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE|CV_CALIB_CB_FAST_CHECK);\n\n        // Chessboard detected\n        if (found) {\n            // Draw corners\n            cvDrawChessboardCorners(image, size, corners, corner_count, found);\n\n            // If you push Space key\n            if (key == ' ') {\n                // Add to buffer\n                images.push_back(gray);\n            }\n            else {\n                // Release the image\n                cvReleaseImage(&gray);\n            }\n        }\n        // Failed to detect\n        else {\n            // Release the image\n            cvReleaseImage(&gray);\n        }\n\n        // Display the image\n        cvDrawText(image, cvPoint(15, 20), \"NUM = %d\", (int)images.size());\n        cvShowImage(\"camera\", image);\n    }\n\n    // Destroy the window\n    cvDestroyWindow(\"camera\");\n\n    // At least one image was taken\n    if (!images.empty()) {\n        // Total number of images\n        const int num = (int)images.size();\n\n        //// For debug\n        //for (int i = 0; i < num; i++) {\n        //    char name[256];\n        //    sprintf(name, \"images[%d/%d]\", i+1, num);\n        //    cvShowImage(name, images[i]);\n        //    cvWaitKey(0);\n        //    cvDestroyWindow(name);\n        //}\n\n        // Ask save parameters or not\n        if (cvAsk(\"Do you save the camera parameters ? (y/n)\\n\")) {\n            // Detect coners\n            int *p_count = (int*)malloc(sizeof(int) * num);\n            CvPoint2D32f *corners = (CvPoint2D32f*)cvAlloc(sizeof(CvPoint2D32f) * num * PAT_SIZE);\n            for (int i = 0; i < num; i++) {\n                // Detect chessboard\n                int corner_count = 0;\n                CvSize size = cvSize(PAT_COL, PAT_ROW);\n                int found = cvFindChessboardCorners(images[i], size, &corners[i * PAT_SIZE], &corner_count);\n\n                // Convert the corners to sub-pixel\n                cvFindCornerSubPix(images[i], &corners[i * PAT_SIZE], corner_count, cvSize(3, 3), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));\n                p_count[i] = corner_count;\n            }\n\n            // Set the 3D position of patterns\n            CvPoint3D32f *objects = (CvPoint3D32f*)cvAlloc(sizeof(CvPoint3D32f) * num * PAT_SIZE);\n            for (int i = 0; i < num; i++) {\n                for (int j = 0; j < PAT_ROW; j++) {\n                    for (int k = 0; k < PAT_COL; k++) {\n                        objects[i * PAT_SIZE + j * PAT_COL + k].x = j * CHESS_SIZE;\n                        objects[i * PAT_SIZE + j * PAT_COL + k].y = k * CHESS_SIZE;\n                        objects[i * PAT_SIZE + j * PAT_COL + k].z = 0.0;\n                    }\n                }\n            }\n\n            // Create matrices\n            CvMat object_points, image_points, point_counts;\n            cvInitMatHeader(&object_points, num * PAT_SIZE, 3, CV_32FC1, objects);\n            cvInitMatHeader(&image_points,  num * PAT_SIZE, 1, CV_32FC2, corners);\n            cvInitMatHeader(&point_counts,  num,            1, CV_32SC1, p_count);\n\n            // Estimate intrinsic parameters and distortion coefficients\n            printf(\"Calicurating parameters...\");\n            CvMat *intrinsic   = cvCreateMat(3, 3, CV_32FC1);\n            CvMat *distortion  = cvCreateMat(1, 4, CV_32FC1);\n            cvCalibrateCamera2(&object_points, &image_points, &point_counts, cvGetSize(images[0]), intrinsic, distortion);\n            printf(\"Finished !\\n\");\n\n            // Output a file\n            printf(\"Generating a XML file...\");\n            CvFileStorage *fs = cvOpenFileStorage(\"camera.xml\", 0, CV_STORAGE_WRITE);\n            cvWrite(fs, \"intrinsic\", intrinsic);\n            cvWrite(fs, \"distortion\", distortion);\n            cvReleaseFileStorage(&fs);    \n            printf(\"Finished !\\n\");\n\n            // Release the matrices\n            free(p_count);\n            cvFree(&corners);\n            cvFree(&objects);\n            cvReleaseMat(&intrinsic);\n            cvReleaseMat(&distortion);\n        }\n\n        // Release the images\n        for (int i = 0; i < num; i++) cvReleaseImage(&images[i]);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}\n#else\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Image of AR.Drone's camera\n    IplImage *image = ardrone.getImage();\n\n    // Read intrincis camera parameters\n    CvFileStorage *fs = cvOpenFileStorage(\"camera.xml\", 0, CV_STORAGE_READ);\n    CvMat *intrinsic = (CvMat*)cvRead(fs, cvGetFileNodeByName(fs, NULL, \"intrinsic\"));\n    CvMat *distortion = (CvMat*)cvRead(fs, cvGetFileNodeByName(fs, NULL, \"distortion\"));\n\n    // Initialize undistortion maps\n    CvMat *mapx = cvCreateMat(image->height, image->width, CV_32FC1);\n    CvMat *mapy = cvCreateMat(image->height, image->width, CV_32FC1);\n    cvInitUndistortMap(intrinsic, distortion, mapx, mapy);\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(1);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        image = ardrone.getImage();\n\n        // Remap the image\n        cvRemap(image, image, mapx, mapy);\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n    }\n\n    // Release the matrices\n    cvReleaseMat(&mapx);\n    cvReleaseMat(&mapy);\n    cvReleaseFileStorage(&fs);\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}\n#endif"
  },
  {
    "path": "samples/old/sample_condens_tracking.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n#include \"opencv2/legacy/legacy.hpp\"\n#include \"opencv2/legacy/compat.hpp\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Particle filter\n    CvConDensation *con = cvCreateConDensation(4, 0, 3000);\n\n    // Setup\n    CvMat *lowerBound = cvCreateMat(4, 1, CV_32FC1);\n    CvMat *upperBound = cvCreateMat(4, 1, CV_32FC1);\n    cvmSet(lowerBound, 0, 0, 0);\n    cvmSet(lowerBound, 1, 0, 0);\n    cvmSet(lowerBound, 2, 0, -10);\n    cvmSet(lowerBound, 3, 0, -10);\n    cvmSet(upperBound, 0, 0, ardrone.getImage()->width);\n    cvmSet(upperBound, 1, 0, ardrone.getImage()->height);\n    cvmSet(upperBound, 2, 0, 10);\n    cvmSet(upperBound, 3, 0, 10);\n\n    // Initialize particle filter\n    cvConDensInitSampleSet(con, lowerBound, upperBound);\n\n    // Linear system\n    con->DynamMatr[0]  = 1.0; con->DynamMatr[1]  = 0.0; con->DynamMatr[2]  = 1.0; con->DynamMatr[3]  = 0.0; \n    con->DynamMatr[4]  = 0.0; con->DynamMatr[5]  = 1.0; con->DynamMatr[6]  = 0.0; con->DynamMatr[7]  = 1.0; \n    con->DynamMatr[8]  = 0.0; con->DynamMatr[9]  = 0.0; con->DynamMatr[10] = 1.0; con->DynamMatr[11] = 0.0; \n    con->DynamMatr[12] = 0.0; con->DynamMatr[13] = 0.0; con->DynamMatr[14] = 0.0; con->DynamMatr[15] = 1.0; \n\n    // Noises\n    cvRandInit(&(con->RandS[0]), -25, 25, (int)cvGetTickCount());\n    cvRandInit(&(con->RandS[1]), -25, 25, (int)cvGetTickCount());\n    cvRandInit(&(con->RandS[2]),  -5,  5, (int)cvGetTickCount());\n    cvRandInit(&(con->RandS[3]),  -5,  5, (int)cvGetTickCount());\n\n    // Thresholds\n    int minH = 0, maxH = 255;\n    int minS = 0, maxS = 255;\n    int minV = 0, maxV = 255;\n\n    // Create a window\n    cvNamedWindow(\"binalized\");\n    cvCreateTrackbar(\"H max\", \"binalized\", &maxH, 255);\n    cvCreateTrackbar(\"H min\", \"binalized\", &minH, 255);\n    cvCreateTrackbar(\"S max\", \"binalized\", &maxS, 255);\n    cvCreateTrackbar(\"S min\", \"binalized\", &minS, 255);\n    cvCreateTrackbar(\"V max\", \"binalized\", &maxV, 255);\n    cvCreateTrackbar(\"V min\", \"binalized\", &minV, 255);\n    cvResizeWindow(\"binalized\", 0, 0);\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(1);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // HSV image\n        IplImage *hsv = cvCloneImage(image);\n        cvCvtColor(image, hsv, CV_RGB2HSV_FULL);\n\n        // Binalized image\n        IplImage *binalized = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);\n\n        // Binalize\n        CvScalar lower = cvScalar(minH, minS, minV);\n        CvScalar upper = cvScalar(maxH, maxS, maxV);\n        cvInRangeS(hsv, lower, upper, binalized);\n\n        // Show result\n        cvShowImage(\"binalized\", binalized);\n\n        // De-noising\n        cvMorphologyEx(binalized, binalized, NULL, NULL, CV_MOP_CLOSE);\n \n        // Detect contours\n        CvSeq *contour = NULL, *maxContour = NULL;\n        CvMemStorage *contourStorage = cvCreateMemStorage();\n        cvFindContours(binalized, contourStorage, &contour, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);\n\n        // Find largest contour\n        double max_area = 0.0;\n        while (contour) {\n            double area = fabs(cvContourArea(contour));\n            if ( area > max_area) {\n                maxContour = contour;\n                max_area = area;\n            }\n            contour = contour->h_next;\n        }\n\n        // Object detected\n        if (maxContour) {\n            // Draw a contour\n            cvZero(binalized);\n            cvDrawContours(binalized, maxContour, cvScalarAll(255), cvScalarAll(255), 0, CV_FILLED);\n\n            // Calculate the moments\n            CvMoments moments;\n            cvMoments(binalized, &moments, 1);\n            int my = (int)(moments.m01/moments.m00);\n            int mx = (int)(moments.m10/moments.m00);\n            cvCircle(image, cvPoint(mx, my), 10, CV_RGB(255,0,0));\n\n            // Calculate confidences\n            for (int i = 0; i < con->SamplesNum; i++) {\n                // Sample points\n                float x = (con->flSamples[i][0]);\n                float y = (con->flSamples[i][1]);\n\n                // Valid sample point\n                if (x > 0 && x < image->width && y > 0 && y < image->height) {\n                    // Assume as gauss distribution\n                    double sigma = 50.0;\n                    double dist = hypot(x - mx, y - my);    // Distance to moment\n                    con->flConfidence[i] = 1.0 / (sqrt (2.0 * CV_PI) * sigma) * expf (-dist*dist / (2.0 * sigma*sigma));\n                }\n                else con->flConfidence[i] = 0.0;\n                cvCircle(image, cvPointFrom32f(cvPoint2D32f(x, y)), 3, CV_RGB(0,128,con->flConfidence[i] * 50000));\n            }\n        }\n\n        // Update phase\n        cvConDensUpdateByTime(con);\n\n        // Sum of positions and confidences for calcurate weighted mean value\n        double sumX = 0, sumY = 0, sumConf = 0;\n        for (int i = 0; i < con->SamplesNum; i++) {\n            sumX += con->flConfidence[i] * con->flSamples[i][0];\n            sumY += con->flConfidence[i] * con->flSamples[i][1];\n            sumConf += con->flConfidence[i];\n        }\n\n        // Estimated value\n        if (sumConf > 0.0) {\n            float x = sumX / sumConf;\n            float y = sumY / sumConf;\n            cvCircle(image, cvPointFrom32f(cvPoint2D32f(x, y)), 10, CV_RGB(0,255,0));\n        }\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n\n        // Release memories\n        cvReleaseImage(&hsv);\n        cvReleaseImage(&binalized);\n        cvReleaseMemStorage(&contourStorage);\n    }\n\n    // Release the particle filter\n    cvReleaseMat(&lowerBound);\n    cvReleaseMat(&upperBound);\n    cvReleaseConDensation(&con);\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_deadreckoning.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Battery\n    printf(\"Battery = %d%%\\n\", ardrone.getBatteryPercentage());\n\n    // Map\n    IplImage *map = cvCreateImage(cvSize(500, 500), IPL_DEPTH_8U, 3);\n    cvZero(map);\n\n    // Position matrix\n    cv::Mat P = cv::Mat::zeros(3, 1, CV_64FC1);\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(33);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // Orientation\n        double roll  = ardrone.getRoll();\n        double pitch = ardrone.getPitch();\n        double yaw   = ardrone.getYaw();\n\n        // Velocity\n        double vx, vy, vz;\n        double velocity = ardrone.getVelocity(&vx, &vy, &vz);\n\n        // Rotation matrices\n        double _RX[] = {        1.0,       0.0,        0.0,\n                                0.0, cos(roll), -sin(roll),\n                                0.0, sin(roll),  cos(roll)};\n        double _RY[] = { cos(pitch),       0.0,  sin(pitch),\n                                0.0,       1.0,        0.0,\n                        -sin(pitch),       0.0,  cos(pitch)};\n        double _RZ[] = {   cos(yaw), -sin(yaw),        0.0,\n                           sin(yaw),  cos(yaw),        0.0,\n                                0.0,       0.0,        1.0};\n        cv::Mat RX(3, 3, CV_64FC1, _RX);\n        cv::Mat RY(3, 3, CV_64FC1, _RY);\n        cv::Mat RZ(3, 3, CV_64FC1, _RZ);\n\n        // Time\n        static int64 last = cv::getTickCount();\n        double dt = (cv::getTickCount() - last) / cv::getTickFrequency();\n        last = cv::getTickCount();\n\n        // Local movement\n        double _M[] = {vx * dt, vy * dt, vz * dt};\n        cv::Mat M(3, 1, CV_64FC1, _M);\n\n        // Dead reckoning\n        P = P + RZ * RY * RX * M;\n\n        // Position (x, y, z)\n        double pos[3] = {P.at<double>(0,0), P.at<double>(1,0), P.at<double>(2,0)};\n        printf(\"x = %3.2f, y = %3.2f, z = %3.2f\", pos[0], pos[1], pos[2]);\n\n        // Take off / Landing \n        if (key == ' ') {\n            if (ardrone.onGround()) ardrone.takeoff();\n            else                    ardrone.landing();\n        }\n\n        // Move\n        double x = 0.0, y = 0.0, z = 0.0, r = 0.0;\n        if (key == 0x260000) x =  1.0;\n        if (key == 0x280000) x = -1.0;\n        if (key == 0x250000) r =  1.0;\n        if (key == 0x270000) r = -1.0;\n        if (key == 'q')      z =  1.0;\n        if (key == 'a')      z = -1.0;\n        ardrone.move3D(x, y, z, r);\n\n        // Change camera\n        static int mode = 0;\n        if (key == 'c') ardrone.setCamera(++mode%4);\n\n        // Display the image\n        cvDrawCircle(map, cvPoint(-pos[1]*30.0 + map->width/2, -pos[0]*30.0 + map->height/2), 2, CV_RGB(255,0,0));\n        cvShowImage(\"map\", map);\n        cvShowImage(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n    cvReleaseImage(&map);\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_default.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Battery\n    printf(\"Battery = %d%%\\n\", ardrone.getBatteryPercentage());\n\n    // Instructions\n    printf(\"***************************************\\n\");\n    printf(\"*       CV Drone sample program       *\\n\");\n    printf(\"*           - How to Play -           *\\n\");\n    printf(\"***************************************\\n\");\n    printf(\"*                                     *\\n\");\n    printf(\"* - Controls -                        *\\n\");\n    printf(\"*    'Space' -- Takeoff/Landing       *\\n\");\n    printf(\"*    'Up'    -- Move forward          *\\n\");\n    printf(\"*    'Down'  -- Move backward         *\\n\");\n    printf(\"*    'Left'  -- Turn left             *\\n\");\n    printf(\"*    'Right' -- Turn right            *\\n\");\n    printf(\"*    'Q'     -- Move upward           *\\n\");\n    printf(\"*    'A'     -- Move downward         *\\n\");\n    printf(\"*                                     *\\n\");\n    printf(\"* - Others -                          *\\n\");\n    printf(\"*    'C'     -- Change camera         *\\n\");\n    printf(\"*    'Esc'   -- Exit                  *\\n\");\n    printf(\"*                                     *\\n\");\n    printf(\"***************************************\\n\\n\");\n\n    while (1) {\n        // Key input\n        int key = cvWaitKey(33);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // Take off / Landing \n        if (key == ' ') {\n            if (ardrone.onGround()) ardrone.takeoff();\n            else                    ardrone.landing();\n        }\n\n        // Move\n        double vx = 0.0, vy = 0.0, vz = 0.0, vr = 0.0;\n        if (key == 0x260000) vx =  1.0;\n        if (key == 0x280000) vx = -1.0;\n        if (key == 0x250000) vr =  1.0;\n        if (key == 0x270000) vr = -1.0;\n        if (key == 'q')      vz =  1.0;\n        if (key == 'a')      vz = -1.0;\n        ardrone.move3D(vx, vy, vz, vr);\n\n        // Change camera\n        static int mode = 0;\n        if (key == 'c') ardrone.setCamera(++mode%4);\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_default2.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Battery\n    std::cout << \"Battery = \" << ardrone.getBatteryPercentage() << \"%\" << std::endl;\n\n    // Instructions\n    std::cout << \"***************************************\" << std::endl;\n    std::cout << \"*       CV Drone sample program       *\" << std::endl;\n    std::cout << \"*           - How to Play -           *\" << std::endl;\n    std::cout << \"***************************************\" << std::endl;\n    std::cout << \"*                                     *\" << std::endl;\n    std::cout << \"* - Controls -                        *\" << std::endl;\n    std::cout << \"*    'Space' -- Takeoff/Landing       *\" << std::endl;\n    std::cout << \"*    'Up'    -- Move forward          *\" << std::endl;\n    std::cout << \"*    'Down'  -- Move backward         *\" << std::endl;\n    std::cout << \"*    'Left'  -- Turn left             *\" << std::endl;\n    std::cout << \"*    'Right' -- Turn right            *\" << std::endl;\n    std::cout << \"*    'Q'     -- Move upward           *\" << std::endl;\n    std::cout << \"*    'A'     -- Move downward         *\" << std::endl;\n    std::cout << \"*                                     *\" << std::endl;\n    std::cout << \"* - Others -                          *\" << std::endl;\n    std::cout << \"*    'C'     -- Change camera         *\" << std::endl;\n    std::cout << \"*    'Esc'   -- Exit                  *\" << std::endl;\n    std::cout << \"*                                     *\" << std::endl;\n    std::cout << \"***************************************\\n\" << std::endl;\n\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // Take off / Landing \n        if (key == ' ') {\n            if (ardrone.onGround()) ardrone.takeoff();\n            else                    ardrone.landing();\n        }\n\n        // Move\n        double vx = 0.0, vy = 0.0, vz = 0.0, vr = 0.0;\n        if (key == 0x260000) vx =  1.0;\n        if (key == 0x280000) vx = -1.0;\n        if (key == 0x250000) vr =  1.0;\n        if (key == 0x270000) vr = -1.0;\n        if (key == 'q')      vz =  1.0;\n        if (key == 'a')      vz = -1.0;\n        ardrone.move3D(vx, vy, vz, vr);\n\n        // Change camera\n        static int mode = 0;\n        if (key == 'c') ardrone.setCamera(++mode%4);\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_detection.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Thresholds\n    int minH = 0, maxH = 255;\n    int minS = 0, maxS = 255;\n    int minV = 0, maxV = 255;\n\n    // Create a window\n    cvNamedWindow(\"binalized\");\n    cvCreateTrackbar(\"H max\", \"binalized\", &maxH, 255);\n    cvCreateTrackbar(\"H min\", \"binalized\", &minH, 255);\n    cvCreateTrackbar(\"S max\", \"binalized\", &maxS, 255);\n    cvCreateTrackbar(\"S min\", \"binalized\", &minS, 255);\n    cvCreateTrackbar(\"V max\", \"binalized\", &maxV, 255);\n    cvCreateTrackbar(\"V min\", \"binalized\", &minV, 255);\n    cvResizeWindow(\"binalized\", 0, 0);\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(33);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // HSV image\n        IplImage *hsv = cvCloneImage(image);\n        cvCvtColor(image, hsv, CV_RGB2HSV_FULL);\n\n        // Binalized image\n        IplImage *binalized = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);\n\n        // Binalize\n        CvScalar lower = cvScalar(minH, minS, minV);\n        CvScalar upper = cvScalar(maxH, maxS, maxV);\n        cvInRangeS(hsv, lower, upper, binalized);\n\n        // Show result\n        cvShowImage(\"binalized\", binalized);\n\n        // De-noising\n        cvMorphologyEx(binalized, binalized, NULL, NULL, CV_MOP_CLOSE);\n \n        // Detect contours\n        CvSeq *contour = NULL, *maxContour = NULL;\n        CvMemStorage *contourStorage = cvCreateMemStorage();\n        cvFindContours(binalized, contourStorage, &contour, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);\n\n        // Find largest contour\n        double max_area = 0.0;\n        while (contour) {\n            double area = fabs(cvContourArea(contour));\n            if (area > max_area) {\n                maxContour = contour;\n                max_area = area;\n            }\n            contour = contour->h_next;\n        }\n\n        // Object detected\n        if (maxContour) {\n            // Show result\n            CvRect rect = cvBoundingRect(maxContour);\n            CvPoint minPoint, maxPoint;\n            minPoint.x = rect.x;\n            minPoint.y = rect.y;\n            maxPoint.x = rect.x + rect.width;\n            maxPoint.y = rect.y + rect.height;\n            cvRectangle(image, minPoint, maxPoint, CV_RGB(0,255,0));\n        }\n\n        // Release memory\n        cvReleaseMemStorage(&contourStorage);\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n\n        // Release images\n        cvReleaseImage(&hsv);\n        cvReleaseImage(&binalized);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_detection2.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Thresholds\n    int minH = 0, maxH = 255;\n    int minS = 0, maxS = 255;\n    int minV = 0, maxV = 255;\n\n    // Create a window\n    cv::namedWindow(\"binalized\");\n    cv::createTrackbar(\"H max\", \"binalized\", &maxH, 255);\n    cv::createTrackbar(\"H min\", \"binalized\", &minH, 255);\n    cv::createTrackbar(\"S max\", \"binalized\", &maxS, 255);\n    cv::createTrackbar(\"S min\", \"binalized\", &minS, 255);\n    cv::createTrackbar(\"V max\", \"binalized\", &maxV, 255);\n    cv::createTrackbar(\"V min\", \"binalized\", &minV, 255);\n    cv::resizeWindow(\"binalized\", 0, 0);\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // HSV image\n        cv::Mat hsv;\n        cv::cvtColor(image, hsv, cv::COLOR_BGR2HSV_FULL);\n\n        // Binalize\n        cv::Mat binalized;\n        cv::Scalar lower(minH, minS, minV);\n        cv::Scalar upper(maxH, maxS, maxV);\n        cv::inRange(hsv, lower, upper, binalized);\n\n        // Show result\n        cv::imshow(\"binalized\", binalized);\n\n        // De-noising\n        cv::Mat kernel = getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));\n        cv::morphologyEx(binalized, binalized, cv::MORPH_CLOSE, kernel);\n        //cv::imshow(\"morphologyEx\", binalized);\n\n        // Detect contours\n        std::vector<std::vector<cv::Point>> contours;\n        cv::findContours(binalized.clone(), contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);\n\n        // Find largest contour\n        int contour_index = -1;\n        double max_area = 0.0;\n        for (int i = 0; i < contours.size(); i++) {\n            double area = fabs(cv::contourArea(contours[i]));\n            if (area > max_area) {\n                contour_index = i;\n                max_area = area;\n            }\n        }\n\n        // Object detected\n        if (contour_index >= 0) {\n            // Show result\n            cv::Rect rect = cv::boundingRect(contours[contour_index]);\n            cv::rectangle(image, rect, cv::Scalar(0,255,0));\n            //cv::drawContours(image, contours, contour_index, cv::Scalar(0,255,0));\n        }\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_flight_animation.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Battery\n    printf(\"Battery = %d%%\\n\", ardrone.getBatteryPercentage());\n\n    // Instructions\n    printf(\"  Q - ARDRONE_ANIM_PHI_M30_DEG\\n\");\n    printf(\"  A - ARDRONE_ANIM_PHI_30_DEG\\n\");\n    printf(\"  Z - ARDRONE_ANIM_THETA_M30_DEG\\n\");\n    printf(\"  W - ARDRONE_ANIM_THETA_30_DEG\\n\");\n    printf(\"  S - ARDRONE_ANIM_THETA_20DEG_YAW_200DEG\\n\");\n    printf(\"  X - ARDRONE_ANIM_THETA_20DEG_YAW_M200DEG\\n\");\n    printf(\"  E - ARDRONE_ANIM_TURNAROUND\\n\");\n    printf(\"  D - ARDRONE_ANIM_TURNAROUND_GODOWN\\n\");\n    printf(\"  C - ARDRONE_ANIM_YAW_SHAKE\\n\");\n    printf(\"  R - ARDRONE_ANIM_YAW_DANCE\\n\");\n    printf(\"  F - ARDRONE_ANIM_PHI_DANCE\\n\");\n    printf(\"  V - ARDRONE_ANIM_THETA_DANCE\\n\");\n    printf(\"  T - ARDRONE_ANIM_VZ_DANCE\\n\");\n    printf(\"  G - ARDRONE_ANIM_WAVE\\n\");\n    printf(\"  B - ARDRONE_ANIM_PHI_THETA_MIXED\\n\");\n    printf(\"  Y - ARDRONE_ANIM_DOUBLE_PHI_THETA_MIXED\\n\");\n    printf(\"  H - ARDRONE_ANIM_FLIP_AHEAD\\n\");\n    printf(\"  N - ARDRONE_ANIM_FLIP_BEHIND\\n\");\n    printf(\"  U - ARDRONE_ANIM_FLIP_LEFT\\n\");\n    printf(\"  J - ARDRONE_ANIM_FLIP_RIGHT\\n\");\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(33);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // Take off / Landing \n        if (key == ' ') {\n            if (ardrone.onGround()) ardrone.takeoff();\n            else                    ardrone.landing();\n        }\n\n        // Flight animations\n        if (key == 'q') ardrone.setAnimation(ARDRONE_ANIM_PHI_M30_DEG,             1000);\n        if (key == 'a') ardrone.setAnimation(ARDRONE_ANIM_PHI_30_DEG,              1000);\n        if (key == 'z') ardrone.setAnimation(ARDRONE_ANIM_THETA_M30_DEG,           1000);\n        if (key == 'w') ardrone.setAnimation(ARDRONE_ANIM_THETA_30_DEG,            1000);\n        if (key == 's') ardrone.setAnimation(ARDRONE_ANIM_THETA_20DEG_YAW_200DEG,  1000);\n        if (key == 'x') ardrone.setAnimation(ARDRONE_ANIM_THETA_20DEG_YAW_M200DEG, 1000);\n        if (key == 'e') ardrone.setAnimation(ARDRONE_ANIM_TURNAROUND,              5000);\n        if (key == 'd') ardrone.setAnimation(ARDRONE_ANIM_TURNAROUND_GODOWN,       5000);\n        if (key == 'c') ardrone.setAnimation(ARDRONE_ANIM_YAW_SHAKE,               2000);\n        if (key == 'r') ardrone.setAnimation(ARDRONE_ANIM_YAW_DANCE,               5000);\n        if (key == 'f') ardrone.setAnimation(ARDRONE_ANIM_PHI_DANCE,               5000);\n        if (key == 'v') ardrone.setAnimation(ARDRONE_ANIM_THETA_DANCE,             5000);\n        if (key == 't') ardrone.setAnimation(ARDRONE_ANIM_VZ_DANCE,                5000);\n        if (key == 'g') ardrone.setAnimation(ARDRONE_ANIM_WAVE,                    5000);\n        if (key == 'b') ardrone.setAnimation(ARDRONE_ANIM_PHI_THETA_MIXED,         5000);\n        if (key == 'y') ardrone.setAnimation(ARDRONE_ANIM_DOUBLE_PHI_THETA_MIXED,  5000);\n        if (key == 'h') ardrone.setAnimation(ARDRONE_ANIM_FLIP_AHEAD,                15);\n        if (key == 'n') ardrone.setAnimation(ARDRONE_ANIM_FLIP_BEHIND,               15);\n        if (key == 'u') ardrone.setAnimation(ARDRONE_ANIM_FLIP_LEFT,                 15);\n        if (key == 'j') ardrone.setAnimation(ARDRONE_ANIM_FLIP_RIGHT,                15);\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_hog.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Initialize detector\n    cv::HOGDescriptor hog;\n    hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(1);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        cv::Mat img = ardrone.getImage();\n\n        // Detect\n        std::vector<cv::Rect> found;\n        hog.detectMultiScale(img, found, 0, cv::Size(4,4), cv::Size(0, 0), 1.5, 2.0);\n\n        // Show bounding rect\n        std::vector<cv::Rect>::const_iterator it;\n        for (it = found.begin(); it != found.end(); ++it) {\n            cv::Rect r = *it;\n            cv::rectangle(img, r.tl(), r.br(), cv::Scalar(255,0,0), 2);\n        }\n\n        // Display the image\n        cv::imshow(\"hog\", img); \n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_kalman_tracking.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Kalman filter\n    CvKalman *kalman = cvCreateKalman(4, 2);\n\n    // Setup\n    cvSetIdentity(kalman->measurement_matrix, cvRealScalar(1.0));\n    cvSetIdentity(kalman->process_noise_cov, cvRealScalar(1e-5));\n    cvSetIdentity(kalman->measurement_noise_cov, cvRealScalar(0.1));\n    cvSetIdentity(kalman->error_cov_post, cvRealScalar(1.0));\n\n    // Linear system\n    kalman->DynamMatr[0]  = 1.0; kalman->DynamMatr[1]  = 0.0; kalman->DynamMatr[2]  = 1.0; kalman->DynamMatr[3]  = 0.0; \n    kalman->DynamMatr[4]  = 0.0; kalman->DynamMatr[5]  = 1.0; kalman->DynamMatr[6]  = 0.0; kalman->DynamMatr[7]  = 1.0; \n    kalman->DynamMatr[8]  = 0.0; kalman->DynamMatr[9]  = 0.0; kalman->DynamMatr[10] = 1.0; kalman->DynamMatr[11] = 0.0; \n    kalman->DynamMatr[12] = 0.0; kalman->DynamMatr[13] = 0.0; kalman->DynamMatr[14] = 0.0; kalman->DynamMatr[15] = 1.0; \n\n    // Thresholds\n    int minH = 0, maxH = 255;\n    int minS = 0, maxS = 255;\n    int minV = 0, maxV = 255;\n\n    // Create a window\n    cvNamedWindow(\"binalized\");\n    cvCreateTrackbar(\"H max\", \"binalized\", &maxH, 255);\n    cvCreateTrackbar(\"H min\", \"binalized\", &minH, 255);\n    cvCreateTrackbar(\"S max\", \"binalized\", &maxS, 255);\n    cvCreateTrackbar(\"S min\", \"binalized\", &minS, 255);\n    cvCreateTrackbar(\"V max\", \"binalized\", &maxV, 255);\n    cvCreateTrackbar(\"V min\", \"binalized\", &minV, 255);\n    cvResizeWindow(\"binalized\", 0, 0);\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(1);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // HSV image\n        IplImage *hsv = cvCloneImage(image);\n        cvCvtColor(image, hsv, CV_RGB2HSV_FULL);\n\n        // Binalized image\n        IplImage *binalized = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);\n\n        // Binalize\n        CvScalar lower = cvScalar(minH, minS, minV);\n        CvScalar upper = cvScalar(maxH, maxS, maxV);\n        cvInRangeS(hsv, lower, upper, binalized);\n\n        // Show result\n        cvShowImage(\"binalized\", binalized);\n\n        // De-noising\n        cvMorphologyEx(binalized, binalized, NULL, NULL, CV_MOP_CLOSE);\n \n        // Detect contours\n        CvSeq *contour = NULL, *maxContour = NULL;\n        CvMemStorage *contourStorage = cvCreateMemStorage();\n        cvFindContours(binalized, contourStorage, &contour, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);\n\n        // Find largest contour\n        double max_area = 0.0;\n        while (contour) {\n            double area = fabs(cvContourArea(contour));\n            if ( area > max_area) {\n                maxContour = contour;\n                max_area = area;\n            }\n            contour = contour->h_next;\n        }\n\n        // Object detected\n        if (maxContour) {\n            // Draw a contour\n            cvZero(binalized);\n            cvDrawContours(binalized, maxContour, cvScalarAll(255), cvScalarAll(255), 0, CV_FILLED);\n\n            // Calculate the moments\n            CvMoments moments;\n            cvMoments(binalized, &moments, 1);\n            int my = (int)(moments.m01/moments.m00);\n            int mx = (int)(moments.m10/moments.m00);\n\n            // Measurements\n            float m[] = {mx, my};\n            CvMat measurement = cvMat(2, 1, CV_32FC1, m);\n\n            // Correct phase\n            const CvMat *correction = cvKalmanCorrect(kalman, &measurement);\n        }\n\n        // Prediction phase\n        const CvMat *prediction = cvKalmanPredict(kalman);\n\n        // Display the image\n        cvCircle(image, cvPointFrom32f(cvPoint2D32f(prediction->data.fl[0], prediction->data.fl[1])), 10, CV_RGB(0,255,0));\n        cvShowImage(\"camera\", image);\n\n        // Release the memories\n        cvReleaseImage(&hsv);\n        cvReleaseImage(&binalized);\n        cvReleaseMemStorage(&contourStorage);\n    }\n\n    // Release the kalman filter\n    cvReleaseKalman(&kalman);\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_led_animation.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Instructions\n    printf(\"  Q - BLINK_GREEN_RED\\n\");\n    printf(\"  A - BLINK_GREEN\\n\");\n    printf(\"  Z - BLINK_RED\\n\");\n    printf(\"  W - BLINK_ORANGE\\n\");\n    printf(\"  S - SNAKE_GREEN_RED\\n\");\n    printf(\"  X - FIRE\\n\");\n    printf(\"  E - STANDARD\\n\");\n    printf(\"  D - RED\\n\");\n    printf(\"  C - GREEN\\n\");\n    printf(\"  R - RED_SNAKE\\n\");\n    printf(\"  F - BLANK\\n\");\n    printf(\"  V - RIGHT_MISSILE\\n\");\n    printf(\"  T - LEFT_MISSILE\\n\");\n    printf(\"  G - DOUBLE_MISSILE\\n\");\n    printf(\"  B - FRONT_LEFT_GREEN_OTHERS_RED\\n\");\n    printf(\"  Y - FRONT_RIGHT_GREEN_OTHERS_RED\\n\");\n    printf(\"  H - REAR_RIGHT_GREEN_OTHERS_RED\\n\");\n    printf(\"  N - REAR_LEFT_GREEN_OTHERS_RED\\n\");\n    printf(\"  U - LEFT_GREEN_RIGHT_RED\\n\");\n    printf(\"  J - LEFT_RED_RIGHT_GREEN\\n\");\n    printf(\"  M - BLINK_STANDARD\\n\");\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(100);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // LED animations\n        if (key == 'q') ardrone.setLED(ARDRONE_LED_ANIM_BLINK_GREEN_RED,              0.5, 5);\n        if (key == 'a') ardrone.setLED(ARDRONE_LED_ANIM_BLINK_GREEN,                  0.5, 5);\n        if (key == 'z') ardrone.setLED(ARDRONE_LED_ANIM_BLINK_RED,                    0.5, 5);\n        if (key == 'w') ardrone.setLED(ARDRONE_LED_ANIM_BLINK_ORANGE,                 0.5, 5);\n        if (key == 's') ardrone.setLED(ARDRONE_LED_ANIM_SNAKE_GREEN_RED,              0.5, 5);\n        if (key == 'x') ardrone.setLED(ARDRONE_LED_ANIM_FIRE,                         0.5, 5);\n        if (key == 'e') ardrone.setLED(ARDRONE_LED_ANIM_STANDARD,                     0.5, 5);\n        if (key == 'd') ardrone.setLED(ARDRONE_LED_ANIM_RED,                          0.5, 5);\n        if (key == 'c') ardrone.setLED(ARDRONE_LED_ANIM_GREEN,                        0.5, 5);\n        if (key == 'r') ardrone.setLED(ARDRONE_LED_ANIM_RED_SNAKE,                    0.5, 5);\n        if (key == 'f') ardrone.setLED(ARDRONE_LED_ANIM_BLANK,                        0.5, 5);\n        if (key == 'v') ardrone.setLED(ARDRONE_LED_ANIM_RIGHT_MISSILE,                0.5, 5);\n        if (key == 't') ardrone.setLED(ARDRONE_LED_ANIM_LEFT_MISSILE,                 0.5, 5);\n        if (key == 'g') ardrone.setLED(ARDRONE_LED_ANIM_DOUBLE_MISSILE,               0.5, 5);\n        if (key == 'b') ardrone.setLED(ARDRONE_LED_ANIM_FRONT_LEFT_GREEN_OTHERS_RED,  0.5, 5);\n        if (key == 'y') ardrone.setLED(ARDRONE_LED_ANIM_FRONT_RIGHT_GREEN_OTHERS_RED, 0.5, 5);\n        if (key == 'h') ardrone.setLED(ARDRONE_LED_ANIM_REAR_RIGHT_GREEN_OTHERS_RED,  0.5, 5);\n        if (key == 'n') ardrone.setLED(ARDRONE_LED_ANIM_REAR_LEFT_GREEN_OTHERS_RED,   0.5, 5);\n        if (key == 'u') ardrone.setLED(ARDRONE_LED_ANIM_LEFT_GREEN_RIGHT_RED,         0.5, 5);\n        if (key == 'j') ardrone.setLED(ARDRONE_LED_ANIM_LEFT_RED_RIGHT_GREEN,         0.5, 5);\n        if (key == 'm') ardrone.setLED(ARDRONE_LED_ANIM_BLINK_STANDARD,               0.5, 5);\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_marker_based_ar.cpp",
    "content": "﻿// C++ STL\n#include <iostream>\n#include <fstream>\n\n// OpenCV\n#include <opencv2/opencv.hpp>\n\n// OpenGL\n#include <GL/glut.h>\n\n// AR.Drone\n#include \"ardrone/ardrone.h\"\n\n// Marker detector\n#include \".\\3rdparty\\packtpub\\MarkerDetector.hpp\"\n\n// Parameter for calibration pattern\n#define PAT_ROWS   (7)                  // Rows of pattern\n#define PAT_COLS   (10)                 // Columns of pattern\n#define CHESS_SIZE (24.0)               // Size of a pattern [mm]\n\n// Global variables\nARDrone ardrone;\ncv::Mat mapx, mapy;\nCameraCalibration calibration;\n\n// --------------------------------------------------------------------------\n// buildProjectionMatrix(Camera matrix, Screen width, Screen height)\n// Description  : Calculate projection matrix from camera and screen paremeters.\n// Return value : Projection matrix\n// --------------------------------------------------------------------------\nMatrix44 buildProjectionMatrix(Matrix33 cameraMatrix, int screen_width, int screen_height)\n{\n\tfloat d_near = 0.01;  // Near clipping distance\n\tfloat d_far = 100;    // Far clipping distance\n\n\t// Camera parameters\n\tfloat f_x = cameraMatrix.data[0]; // Focal length in x axis\n\tfloat f_y = cameraMatrix.data[4]; // Focal length in y axis (usually the same?)\n\tfloat c_x = cameraMatrix.data[2]; // Camera primary point x\n\tfloat c_y = cameraMatrix.data[5]; // Camera primary point y\n\n\tMatrix44 projectionMatrix;\n\tprojectionMatrix.data[0] = -2.0 * f_x / screen_width;\n\tprojectionMatrix.data[1] = 0.0;\n\tprojectionMatrix.data[2] = 0.0;\n\tprojectionMatrix.data[3] = 0.0;\n\n\tprojectionMatrix.data[4] = 0.0;\n\tprojectionMatrix.data[5] = 2.0 * f_y / screen_height;\n\tprojectionMatrix.data[6] = 0.0;\n\tprojectionMatrix.data[7] = 0.0;\n\n\tprojectionMatrix.data[8] = 2.0 * c_x / screen_width - 1.0;\n\tprojectionMatrix.data[9] = 2.0 * c_y / screen_height - 1.0;\n\tprojectionMatrix.data[10] = -(d_far + d_near) / (d_far - d_near);\n\tprojectionMatrix.data[11] = -1.0;\n\n\tprojectionMatrix.data[12] = 0.0;\n\tprojectionMatrix.data[13] = 0.0;\n\tprojectionMatrix.data[14] = -2.0 * d_far * d_near / (d_far - d_near);\n\tprojectionMatrix.data[15] = 0.0;\n\n\treturn projectionMatrix;\n}\n\n// --------------------------------------------------------------------------\n// idle()\n// Description  : Idle function.\n// Return value : NONE\n// --------------------------------------------------------------------------\nvoid idle(void)\n{\n\t// Redisplay\n\tglutPostRedisplay();\n}\n\n// --------------------------------------------------------------------------\n// display()\n// Description  : Displaying function.\n// Return value : NONE\n// --------------------------------------------------------------------------\nvoid display(void)\n{\n\t// Clear the buffers\n\tglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n\n\t// Get an image\n\tcv::Mat image_raw = ardrone.getImage();\n\tcv::Mat image;\n\tcv::remap(image_raw, image, mapx, mapy, cv::INTER_LINEAR);\n\n\t// Show the image\n\tcv::Mat rgb;\n\tcv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);\n\tcv::flip(rgb, rgb, 0);\n\tglDepthMask(GL_FALSE);\n\tglDrawPixels(rgb.cols, rgb.rows, GL_RGB, GL_UNSIGNED_BYTE, rgb.data);\n\n\t// Convert to BGRA\n\tcv::Mat bgra;\n\tcv::cvtColor(image, bgra, cv::COLOR_BGR2BGRA);\n\n\t// Prepare for marker detection\n\tBGRAVideoFrame frame;\n\tframe.width = bgra.cols;\n\tframe.height = bgra.rows;\n\tframe.data = bgra.data;\n\tframe.stride = bgra.step;\n\n\t// Detect marker(s)\n\tMarkerDetector detector(calibration);\n\tdetector.processFrame(frame);\n\tstd::vector<Transformation> transformations = detector.getTransformations();\n\n\t// Calculate projection matrix\n\tMatrix44 projectionMatrix = buildProjectionMatrix(calibration.getIntrinsic(), frame.width, frame.height);\n\n\t// Apply the projection matrix\n\tglMatrixMode(GL_PROJECTION);\n\tglLoadMatrixf(projectionMatrix.data);\n\n\t// Change to model view matrix mode\n\tglMatrixMode(GL_MODELVIEW);\n\tglLoadIdentity();\n\n\t// Enable depth mask\n\tglDepthMask(GL_TRUE);\n\n\t// Enable vertex array\n\tglEnableClientState(GL_VERTEX_ARRAY);\n\tglEnableClientState(GL_COLOR_ARRAY);\n\n\t// Push current model view matrix\n\tglPushMatrix();\n\n\t// Set line width\n\tglLineWidth(3.0f);\n\n\t// Vertex arrays\n\tfloat lineX[] = { 0, 0, 0, 1, 0, 0 };\n\tfloat lineY[] = { 0, 0, 0, 0, 1, 0 };\n\tfloat lineZ[] = { 0, 0, 0, 0, 0, 1 };\n\n\t// 2D plane\n\tconst GLfloat squareVertices[] = {-0.5f, -0.5f,\n\t\t\t\t\t\t\t\t\t   0.5f, -0.5f,\n\t\t\t\t\t\t\t\t\t  -0.5f,  0.5f,\n\t\t\t\t\t\t\t\t\t   0.5f,  0.5f};\n\n\t// 2D plane color (RGBA)\n\tconst GLubyte squareColors[] = {255, 255,   0, 255,\n\t\t\t\t\t\t\t\t      0, 255, 255, 255,\n\t\t\t\t\t\t\t\t\t  0,   0,   0,   0,\n\t\t\t\t\t\t\t\t\t255,   0, 255, 255};\n\n\t// Draw AR\n\tfor (size_t i = 0; i < transformations.size(); i++) {\n\t\t// Get transformation\n\t\tconst Transformation &transformation = transformations[i];\n\t\tMatrix44 glMatrix = transformation.getMat44();\n\n\t\t// Load it\n\t\tglLoadMatrixf(reinterpret_cast<const GLfloat*>(&glMatrix.data[0]));\n\n\t\t// Draw 2D plane\n\t\tglEnableClientState(GL_COLOR_ARRAY);\n\t\tglVertexPointer(2, GL_FLOAT, 0, squareVertices);\n\t\tglColorPointer(4, GL_UNSIGNED_BYTE, 0, squareColors);\n\t\tglDrawArrays(GL_TRIANGLE_STRIP, 0, 4);\n\t\tglDisableClientState(GL_COLOR_ARRAY);\n\n\t\t// Scale of coordinate axes\n\t\tfloat scale = 0.5;\n\t\tglScalef(scale, scale, scale);\n\n\t\t// Move it a little\n\t\tglTranslatef(0, 0, 0.1f);\n\n\t\t// X axis\n\t\tglColor4f(1.0f, 0.0f, 0.0f, 1.0f);\n\t\tglVertexPointer(3, GL_FLOAT, 0, lineX);\n\t\tglDrawArrays(GL_LINES, 0, 2);\n\n\t\t// Y axis\n\t\tglColor4f(0.0f, 1.0f, 0.0f, 1.0f);\n\t\tglVertexPointer(3, GL_FLOAT, 0, lineY);\n\t\tglDrawArrays(GL_LINES, 0, 2);\n\n\t\t// Z axis\n\t\tglColor4f(0.0f, 0.0f, 1.0f, 1.0f);\n\t\tglVertexPointer(3, GL_FLOAT, 0, lineZ);\n\t\tglDrawArrays(GL_LINES, 0, 2);\n\t}\n\n\t// Disable vertex array\n\tglDisableClientState(GL_VERTEX_ARRAY);\n\tglDisableClientState(GL_COLOR_ARRAY);\n\n\t// Pop the model view matrix\n\tglPopMatrix();\n\n\t// Swap the buffer\n\tglutSwapBuffers();\n}\n\n// --------------------------------------------------------------------------\n// key(Key pressed, X position of cursor, Y position of cursor)\n// Description  : Key input function.\n// Return value : NONE\n// --------------------------------------------------------------------------\nvoid key(unsigned char key, int x, int y) {\n\tswitch (key) {\n\tcase 0x1b:\n\t\texit(1);\n\t\tbreak;\n\tdefault:\n\t\tbreak;\n\t}\n}\n\n// --------------------------------------------------------------------------\n// resize(Width of window, Height of window)\n// Description  : Resizing function.\n// Return value : NONE\n// --------------------------------------------------------------------------\nvoid resize(int w, int h)\n{\n\t// Set viewport\n\tglViewport(0, 0, w, h);\n\n\t// Set projection matrix\n\tglMatrixMode(GL_PROJECTION);\n\tglLoadIdentity();\n\tgluPerspective(30.0, (double)w / (double)h, 0.01, 100.0);\n}\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // Initialize\n\tif (!ardrone.open()) {\n\t\tstd::cout << \"Failed to initialize.\" << std::endl;\n\t\treturn -1;\n\t}\n\n    // Images\n\tcv::Mat frame = ardrone.getImage();\n\n\t// Open XML file\n    std::string filename(\"camera_ardrone.xml\");\n    std::fstream file(filename.c_str(), std::ios::in);\n\n    // Not found\n    if (!file.is_open()) {\n        // Image buffer\n\t\tstd::vector<cv::Mat> images;\n\t\tstd::cout << \"Press Space key to capture an image\" << std::endl;\n\t\tstd::cout << \"Press Esc to exit\" << std::endl;\n\n\t\t// Main loop\n\t\twhile (1) {\n\t\t\t// Key iput\n\t\t\tint key = cv::waitKey(1);\n\t\t\tif (key == 0x1b) break;\n\n\t\t\t// Get an image\n\t\t\tframe = ardrone.getImage();\n\n\t\t\t// Convert to grayscale\n\t\t\tcv::Mat gray;\n\t\t\tcv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);\n\n\t\t\t// Detect a chessboard\n\t\t\tcv::Size size(PAT_COLS, PAT_ROWS);\n\t\t\tstd::vector<cv::Point2f> corners;\n\t\t\tbool found = cv::findChessboardCorners(gray, size, corners, cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE | cv::CALIB_CB_FAST_CHECK);\n\n\t\t\t// Chessboard detected\n\t\t\tif (found) {\n\t\t\t\t// Draw it\n\t\t\t\tcv::drawChessboardCorners(frame, size, corners, found);\n\n\t\t\t\t// Space key was pressed\n\t\t\t\tif (key == ' ') {\n\t\t\t\t\t// Add to buffer\n\t\t\t\t\timages.push_back(gray);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Show the image\n\t\t\tstd::ostringstream stream;\n\t\t\tstream << \"Captured \" << images.size() << \" image(s).\";\n\t\t\tcv::putText(frame, stream.str(), cv::Point(10, 20), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 1, CV_AA);\n\t\t\tcv::imshow(\"Camera Calibration\", frame);\n\t\t}\n\n\t\t// We have enough samples\n\t\tif (images.size() > 4) {\n\t\t\tcv::Size size(PAT_COLS, PAT_ROWS);\n\t\t\tstd::vector< std::vector<cv::Point2f> > corners2D;\n\t\t\tstd::vector< std::vector<cv::Point3f> > corners3D;\n\n\t\t\tfor (size_t i = 0; i < images.size(); i++) {\n\t\t\t\t// Detect a chessboard\n\t\t\t\tstd::vector<cv::Point2f> tmp_corners2D;\n\t\t\t\tbool found = cv::findChessboardCorners(images[i], size, tmp_corners2D);\n\n\t\t\t\t// Chessboard detected\n\t\t\t\tif (found) {\n\t\t\t\t\t// Convert the corners to sub-pixel\n\t\t\t\t\tcv::cornerSubPix(images[i], tmp_corners2D, cvSize(11, 11), cvSize(-1, -1), cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 30, 0.1));\n\t\t\t\t\tcorners2D.push_back(tmp_corners2D);\n\n\t\t\t\t\t// Set the 3D position of patterns\n\t\t\t\t\tconst float squareSize = CHESS_SIZE;\n\t\t\t\t\tstd::vector<cv::Point3f> tmp_corners3D;\n\t\t\t\t\tfor (int j = 0; j < size.height; j++) {\n\t\t\t\t\t\tfor (int k = 0; k < size.width; k++) {\n\t\t\t\t\t\t\ttmp_corners3D.push_back(cv::Point3f((float)(k*squareSize), (float)(j*squareSize), 0.0));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tcorners3D.push_back(tmp_corners3D);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Estimate camera parameters\n\t\t\tcv::Mat cameraMatrix, distCoeffs;\n\t\t\tstd::vector<cv::Mat> rvec, tvec;\n\t\t\tcv::calibrateCamera(corners3D, corners2D, images[0].size(), cameraMatrix, distCoeffs, rvec, tvec, CV_CALIB_FIX_PRINCIPAL_POINT);\n\t\t\tstd::cout << cameraMatrix << std::endl;\n\t\t\tstd::cout << distCoeffs << std::endl;\n\n\t\t\t// Save them\n\t\t\tcv::FileStorage fs(filename, cv::FileStorage::WRITE);\n\t\t\tfs << \"intrinsic\" << cameraMatrix;\n\t\t\tfs << \"distortion\" << distCoeffs;\n\t\t}\n\n\t\t// Destroy windows\n\t\tcv::destroyAllWindows();\n\t}\n\n\t// Open XML file\n\tcv::FileStorage rfs(filename, cv::FileStorage::READ);\n\tif (!rfs.isOpened()) {\n\t\tstd::cout << \"Failed to open the XML file\" << std::endl;\n\t\treturn -1;\n\t}\n\n\t// Load camera parameters\n\tcv::Mat cameraMatrix, distCoeffs;\n\trfs[\"intrinsic\"] >> cameraMatrix;\n\trfs[\"distortion\"] >> distCoeffs;\n\n\t// Create undistort map\n\tcv::initUndistortRectifyMap(cameraMatrix, distCoeffs, cv::Mat(), cameraMatrix, frame.size(), CV_32FC1, mapx, mapy);\n\n\t// Set camera parameters\n\tfloat fx = cameraMatrix.at<double>(0, 0);\n\tfloat fy = cameraMatrix.at<double>(1, 1);\n\tfloat cx = cameraMatrix.at<double>(0, 2);\n\tfloat cy = cameraMatrix.at<double>(1, 2);\n\tcalibration = CameraCalibration(fx, fy, cx, cy);\n\n\t// Initialize GLUT\n\tglutInit(&argc, argv);\n\tglutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);\n\tglutInitWindowSize(frame.cols, frame.rows);\n\tglutCreateWindow(\"Mastering OpenCV with Practical Computer Vision Project\");\n\tglutDisplayFunc(display);\n\tglutKeyboardFunc(key);\n\tglutIdleFunc(idle);\n\n\t// Clea scene\n\tglClearColor(0.0, 0.0, 1.0, 1.0);\n\tglEnable(GL_DEPTH_TEST);\n\n\t// Start main loop\n\tglutMainLoop();\n\n\treturn 0;\n}"
  },
  {
    "path": "samples/old/sample_minimal.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone(\"192.168.1.1\");\n\n    // Main loop\n    while (1) {\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n\n        // Press Esc to exit\n        if (cvWaitKey(1) == 0x1b) break;\n    }\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_navdata.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(33);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // Orientation\n        double roll  = ardrone.getRoll();\n        double pitch = ardrone.getPitch();\n        double yaw   = ardrone.getYaw();\n        printf(\"ardrone.roll  = %3.2f [deg]\\n\", roll  * RAD_TO_DEG);\n        printf(\"ardrone.pitch = %3.2f [deg]\\n\", pitch * RAD_TO_DEG);\n        printf(\"ardrone.yaw   = %3.2f [deg]\\n\", yaw   * RAD_TO_DEG);\n\n        // Altitude\n        double altitude = ardrone.getAltitude();\n        printf(\"ardrone.altitude = %3.2f [m]\\n\", altitude);\n\n        // Velocity\n        double vx, vy, vz;\n        double velocity = ardrone.getVelocity(&vx, &vy, &vz);\n        printf(\"ardrone.vx = %3.2f [m/s]\\n\", vx);\n        printf(\"ardrone.vy = %3.2f [m/s]\\n\", vy);\n        printf(\"ardrone.vz = %3.2f [m/s]\\n\", vz);\n\n        // Battery\n        int battery = ardrone.getBatteryPercentage();\n        printf(\"ardrone.battery = %d [%%]\\n\", battery);\n\n        // Take off / Landing \n        if (key == ' ') {\n            if (ardrone.onGround()) ardrone.takeoff();\n            else                    ardrone.landing();\n        }\n\n        // Move\n        double x = 0.0, y = 0.0, z = 0.0, r = 0.0;\n        if (key == 0x260000) x =  1.0;\n        if (key == 0x280000) x = -1.0;\n        if (key == 0x250000) r =  1.0;\n        if (key == 0x270000) r = -1.0;\n        ardrone.move3D(x, y, z, r);\n\n        // Change camera\n        static int mode = 0;\n        if (key == 'c') ardrone.setCamera(++mode%4);\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_optical_flow.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Image of AR.Drone's camera\n    IplImage *image = ardrone.getImage();\n\n    // Variables for optical flow\n    int corner_count = 50;\n    IplImage *gray = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);\n    IplImage *prev = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);\n    cvCvtColor(image, prev, CV_BGR2GRAY);\n    IplImage *eig_img = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);\n    IplImage *tmp_img = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);\n    IplImage *prev_pyramid = cvCreateImage(cvSize(image->width+8, image->height/3), IPL_DEPTH_8U, 1);\n    IplImage *curr_pyramid = cvCreateImage(cvSize(image->width+8, image->height/3), IPL_DEPTH_8U, 1);\n    CvPoint2D32f *corners1 = (CvPoint2D32f*)malloc(corner_count * sizeof(CvPoint2D32f));\n    CvPoint2D32f *corners2 = (CvPoint2D32f*)malloc(corner_count * sizeof(CvPoint2D32f));\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(1);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        image = ardrone.getImage();\n\n        // Convert the camera image to grayscale\n        cvCvtColor(image, gray, CV_BGR2GRAY);\n\n        // Detect features\n        int corner_count = 50;\n        cvGoodFeaturesToTrack(prev, eig_img, tmp_img, corners1, &corner_count, 0.1, 5.0, NULL);\n\n        // Corner detected\n        if (corner_count > 0) {\n            char *status = (char*)malloc(corner_count * sizeof(char));\n\n            // Calicurate optical flows\n            CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.3);\n            cvCalcOpticalFlowPyrLK(prev, gray, prev_pyramid, curr_pyramid, corners1, corners2, corner_count, cvSize(10, 10), 3, status, NULL, criteria, 0);\n\n            // Drow the optical flows\n            for (int i = 0; i < corner_count; i++) {\n                cvCircle(image, cvPointFrom32f(corners1[i]), 1, CV_RGB (255, 0, 0));\n                if (status[i]) cvLine(image, cvPointFrom32f(corners1[i]), cvPointFrom32f(corners2[i]), CV_RGB (0, 0, 255), 1, CV_AA, 0);\n            }\n\n            // Release the memory\n            free(status);\n        }\n\n        // Save the last frame\n        cvCopy(gray, prev);\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n    }\n\n    // Release the images\n    cvReleaseImage(&gray);\n    cvReleaseImage(&prev);\n    cvReleaseImage(&eig_img);\n    cvReleaseImage(&tmp_img);\n    cvReleaseImage(&prev_pyramid);\n    cvReleaseImage(&curr_pyramid);\n    free(corners1);\n    free(corners2);\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_video_record.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Recording flag\n    bool rec = false;\n    printf(\"Press 'R' to start/stop recording.\");\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(1);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Video recording start / stop\n        if (key == 'r') {\n            rec = !rec;\n            ardrone.setVideoRecord(rec);\n        }\n\n        // Get an image\n        IplImage *image = ardrone.getImage();\n\n        // Show recording state\n        if (rec) {\n            static CvFont font = cvFont(1.0);\n            cvPutText(image, \"REC\", cvPoint(10, 20), &font, CV_RGB(255,0,0));\n        }\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/old/sample_video_writer.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char **argv)\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        printf(\"Failed to initialize.\\n\");\n        return -1;\n    }\n\n    // Image of AR.Drone's camera\n    IplImage *image = ardrone.getImage();\n\n    // Name of video\n    char filename[256];\n    SYSTEMTIME st;\n    GetLocalTime(&st);\n    sprintf(filename, \"cam%d%02d%02d%02d%02d%02d.avi\", st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);\n\n    // Create a video writer\n    CvVideoWriter *video = cvCreateVideoWriter(filename, CV_FOURCC('D','I','B',' '), 30, cvGetSize(image));\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cvWaitKey(33);\n        if (key == 0x1b) break;\n\n        // Update\n        if (!ardrone.update()) break;\n\n        // Get an image\n        image = ardrone.getImage();\n\n        // Write a frame\n        cvWriteFrame(video, image);\n\n        // Display the image\n        cvShowImage(\"camera\", image);\n    }\n\n    // Save the video\n    cvReleaseVideoWriter(&video);\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_camera_calibration.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// Parameter for calibration pattern\n#define PAT_ROWS   (7)                  // Rows of pattern\n#define PAT_COLS   (10)                 // Columns of pattern\n#define CHESS_SIZE (24.0)               // Size of a pattern [mm]\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Images\n    cv::Mat frame = ardrone.getImage();\n\n    // Open XML file\n    std::string filename(\"camera.xml\");\n    cv::FileStorage fs(filename, cv::FileStorage::READ);\n\n    // Not found\n    if (!fs.isOpened()) {\n        // Image buffer\n        std::vector<cv::Mat> images;\n        std::cout << \"Press Space key to capture an image\" << std::endl;\n        std::cout << \"Press Esc to exit\" << std::endl;\n\n        // Calibration loop\n        while (1) {\n            // Key iput\n            int key = cv::waitKey(1);\n            if (key == 0x1b) break;\n\n            // Get an image\n            frame = ardrone.getImage();\n\n            // Convert to grayscale\n            cv::Mat gray;\n            cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);\n\n            // Detect a chessboard\n            cv::Size size(PAT_COLS, PAT_ROWS);\n            std::vector<cv::Point2f> corners;\n            bool found = cv::findChessboardCorners(gray, size, corners, cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE | cv::CALIB_CB_FAST_CHECK);\n\n            // Chessboard detected\n            if (found) {\n                // Draw it\n                cv::drawChessboardCorners(frame, size, corners, found);\n\n                // Space key was pressed\n                if (key == ' ') {\n                    // Add to buffer\n                    images.push_back(gray);\n                }\n            }\n\n            // Show the image\n            std::ostringstream stream;\n            stream << \"Captured \" << images.size() << \" image(s).\";\n            cv::putText(frame, stream.str(), cv::Point(10, 20), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 1, cv::LINE_AA);\n            cv::imshow(\"Camera Calibration\", frame);\n        }\n\n        // We have enough samples\n        if (images.size() > 4) {\n            cv::Size size(PAT_COLS, PAT_ROWS);\n            std::vector< std::vector<cv::Point2f> > corners2D;\n            std::vector< std::vector<cv::Point3f> > corners3D;\n\n            for (size_t i = 0; i < images.size(); i++) {\n                // Detect a chessboard\n                std::vector<cv::Point2f> tmp_corners2D;\n                bool found = cv::findChessboardCorners(images[i], size, tmp_corners2D);\n\n                // Chessboard detected\n                if (found) {\n                    // Convert the corners to sub-pixel\n                    cv::cornerSubPix(images[i], tmp_corners2D, cvSize(11, 11), cvSize(-1, -1), cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 30, 0.1));\n                    corners2D.push_back(tmp_corners2D);\n\n                    // Set the 3D position of patterns\n                    const float squareSize = CHESS_SIZE;\n                    std::vector<cv::Point3f> tmp_corners3D;\n                    for (int j = 0; j < size.height; j++) {\n                        for (int k = 0; k < size.width; k++) {\n                            tmp_corners3D.push_back(cv::Point3f((float)(k*squareSize), (float)(j*squareSize), 0.0));\n                        }\n                    }\n                    corners3D.push_back(tmp_corners3D);\n                }\n            }\n\n            // Estimate camera parameters\n            cv::Mat cameraMatrix, distCoeffs;\n            std::vector<cv::Mat> rvec, tvec;\n            cv::calibrateCamera(corners3D, corners2D, images[0].size(), cameraMatrix, distCoeffs, rvec, tvec);\n            std::cout << cameraMatrix << std::endl;\n            std::cout << distCoeffs << std::endl;\n\n            // Save them\n            cv::FileStorage tmp(filename, cv::FileStorage::WRITE);\n            tmp << \"intrinsic\" << cameraMatrix;\n            tmp << \"distortion\" << distCoeffs;\n            tmp.release();\n\n            // Reload\n            fs.open(filename, cv::FileStorage::READ);\n        }\n\n        // Destroy windows\n        cv::destroyAllWindows();\n    }\n\n    // Load camera parameters\n    cv::Mat cameraMatrix, distCoeffs;\n    fs[\"intrinsic\"] >> cameraMatrix;\n    fs[\"distortion\"] >> distCoeffs;\n\n    // Create undistort map\n    cv::Mat mapx, mapy;\n    cv::initUndistortRectifyMap(cameraMatrix, distCoeffs, cv::Mat(), cameraMatrix, frame.size(), CV_32FC1, mapx, mapy);\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Get an image\n        cv::Mat image_raw = ardrone.getImage();\n\n        // Undistort\n        cv::Mat image;\n        cv::remap(image_raw, image, mapx, mapy, cv::INTER_LINEAR);\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_deadreckoning.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Battery\n    std::cout << \"Battery = \" << ardrone.getBatteryPercentage() << \" [%]\" << std::endl;\n\n    // Map\n    cv::Mat map = cv::Mat::zeros(500, 500, CV_8UC3);\n\n    // Position matrix\n    cv::Mat P = cv::Mat::zeros(3, 1, CV_64FC1);\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // Altitude\n        double altitude = ardrone.getAltitude();\n\n        // Orientations\n        double roll = ardrone.getRoll();\n        double pitch = ardrone.getPitch();\n        double yaw = ardrone.getYaw();\n\n        // Velocities\n        double vx, vy, vz;\n        double velocity = ardrone.getVelocity(&vx, &vy, &vz);\n        cv::Mat V = (cv::Mat1f(3, 1) << vx, vy, vz);\n\n        // Rotation matrices\n        cv::Mat RZ = (cv::Mat1f(3, 3) << cos(yaw), -sin(yaw), 0.0,\n                                         sin(yaw),  cos(yaw), 0.0,\n                                              0.0,       0.0, 1.0);\n        cv::Mat RY = (cv::Mat1f(3, 3) << cos(pitch), 0.0, sin(pitch),\n                                                0.0, 1.0,        0.0,\n                                        -sin(pitch), 0.0, cos(pitch));\n        cv::Mat RX = (cv::Mat1f(3, 3) << 1.0,       0.0,        0.0,\n                                         0.0, cos(roll), -sin(roll),\n                                         0.0, sin(roll),  cos(roll));\n\n        // Time [s]\n        static int64 last = cv::getTickCount();\n        double dt = (cv::getTickCount() - last) / cv::getTickFrequency();\n        last = cv::getTickCount();\n\n        // Dead-reckoning\n        P = P + RZ * RY * RX * V * dt;\n\n        // Position (x, y, z)\n        double pos[3] = { P.at<double>(0, 0), P.at<double>(1, 0), P.at<double>(2, 0) };\n        std::cout << \"x = \" << pos[0] << \"[m], \" << \"y = \" << pos[1] << \"[m], \" << \"z = \" << pos[2] << \"[m]\" << std::endl;\n\n        // Take off / Landing \n        if (key == ' ') {\n            if (ardrone.onGround()) ardrone.takeoff();\n            else                    ardrone.landing();\n        }\n\n        // Move\n        double x = 0.0, y = 0.0, z = 0.0, r = 0.0;\n        if (key == 'i' || key == CV_VK_UP)    vx =  1.0;\n        if (key == 'k' || key == CV_VK_DOWN)  vx = -1.0;\n        if (key == 'u' || key == CV_VK_LEFT)  vr =  1.0;\n        if (key == 'o' || key == CV_VK_RIGHT) vr = -1.0;\n        if (key == 'j') vy =  1.0;\n        if (key == 'l') vy = -1.0;\n        if (key == 'q') vz =  1.0;\n        if (key == 'a') vz = -1.0;\n        ardrone.move3D(x, y, z, r);\n\n        // Change camera\n        static int mode = 0;\n        if (key == 'c') ardrone.setCamera(++mode % 4);\n\n        // Display the image\n        cv::circle(map, cv::Point(-pos[1] * 100.0 + map.cols / 2, -pos[0] * 100.0 + map.rows / 2), 2, CV_RGB(255, 0, 0));\n        cv::imshow(\"map\", map);\n        cv::imshow(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_default.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Battery\n    std::cout << \"Battery = \" << ardrone.getBatteryPercentage() << \"[%]\" << std::endl;\n\n    // Instructions\n    std::cout << \"***************************************\" << std::endl;\n    std::cout << \"*       CV Drone sample program       *\" << std::endl;\n    std::cout << \"*           - How to play -           *\" << std::endl;\n    std::cout << \"***************************************\" << std::endl;\n    std::cout << \"*                                     *\" << std::endl;\n    std::cout << \"* - Controls -                        *\" << std::endl;\n    std::cout << \"*    'Space' -- Takeoff/Landing       *\" << std::endl;\n    std::cout << \"*    'Up'    -- Move forward          *\" << std::endl;\n    std::cout << \"*    'Down'  -- Move backward         *\" << std::endl;\n    std::cout << \"*    'Left'  -- Turn left             *\" << std::endl;\n    std::cout << \"*    'Right' -- Turn right            *\" << std::endl;\n    std::cout << \"*    'Q'     -- Move upward           *\" << std::endl;\n    std::cout << \"*    'A'     -- Move downward         *\" << std::endl;\n    std::cout << \"*                                     *\" << std::endl;\n    std::cout << \"* - Others -                          *\" << std::endl;\n    std::cout << \"*    'C'     -- Change camera         *\" << std::endl;\n    std::cout << \"*    'Esc'   -- Exit                  *\" << std::endl;\n    std::cout << \"*                                     *\" << std::endl;\n    std::cout << \"***************************************\" << std::endl;\n\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // Take off / Landing \n        if (key == ' ') {\n            if (ardrone.onGround()) ardrone.takeoff();\n            else                    ardrone.landing();\n        }\n\n        // Move\n        double vx = 0.0, vy = 0.0, vz = 0.0, vr = 0.0;\n        if (key == 'i' || key == CV_VK_UP)    vx =  1.0;\n        if (key == 'k' || key == CV_VK_DOWN)  vx = -1.0;\n        if (key == 'u' || key == CV_VK_LEFT)  vr =  1.0;\n        if (key == 'o' || key == CV_VK_RIGHT) vr = -1.0;\n        if (key == 'j') vy =  1.0;\n        if (key == 'l') vy = -1.0;\n        if (key == 'q') vz =  1.0;\n        if (key == 'a') vz = -1.0;\n        ardrone.move3D(vx, vy, vz, vr);\n\n        // Change camera\n        static int mode = 0;\n        if (key == 'c') ardrone.setCamera(++mode % 4);\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_detection.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Thresholds\n    int minH = 0, maxH = 255;\n    int minS = 0, maxS = 255;\n    int minV = 0, maxV = 255;\n\n    // XML save data\n    std::string filename(\"thresholds.xml\");\n    cv::FileStorage fs(filename, cv::FileStorage::READ);\n\n    // If there is a save file then read it\n    if (fs.isOpened()) {\n        maxH = fs[\"H_MAX\"];\n        minH = fs[\"H_MIN\"];\n        maxS = fs[\"S_MAX\"];\n        minS = fs[\"S_MIN\"];\n        maxV = fs[\"V_MAX\"];\n        minV = fs[\"V_MIN\"];\n        fs.release();\n    }\n\n    // Create a window\n    cv::namedWindow(\"binalized\");\n    cv::createTrackbar(\"H max\", \"binalized\", &maxH, 255);\n    cv::createTrackbar(\"H min\", \"binalized\", &minH, 255);\n    cv::createTrackbar(\"S max\", \"binalized\", &maxS, 255);\n    cv::createTrackbar(\"S min\", \"binalized\", &minS, 255);\n    cv::createTrackbar(\"V max\", \"binalized\", &maxV, 255);\n    cv::createTrackbar(\"V min\", \"binalized\", &minV, 255);\n    cv::resizeWindow(\"binalized\", 0, 0);\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // HSV image\n        cv::Mat hsv;\n        cv::cvtColor(image, hsv, cv::COLOR_BGR2HSV_FULL);\n\n        // Binalize\n        cv::Mat binalized;\n        cv::Scalar lower(minH, minS, minV);\n        cv::Scalar upper(maxH, maxS, maxV);\n        cv::inRange(hsv, lower, upper, binalized);\n\n        // Show result\n        cv::imshow(\"binalized\", binalized);\n\n        // De-noising\n        cv::Mat kernel = getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));\n        cv::morphologyEx(binalized, binalized, cv::MORPH_CLOSE, kernel);\n        //cv::imshow(\"morphologyEx\", binalized);\n\n        // Detect contours\n        std::vector< std::vector<cv::Point> > contours;\n        cv::findContours(binalized.clone(), contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);\n\n        // Find largest contour\n        int contour_index = -1;\n        double max_area = 0.0;\n        for (size_t i = 0; i < contours.size(); i++) {\n            double area = fabs(cv::contourArea(contours[i]));\n            if (area > max_area) {\n                contour_index = i;\n                max_area = area;\n            }\n        }\n\n        // Object detected\n        if (contour_index >= 0) {\n            // Show result\n            cv::Rect rect = cv::boundingRect(contours[contour_index]);\n            cv::rectangle(image, rect, cv::Scalar(0,255,0));\n            //cv::drawContours(image, contours, contour_index, cv::Scalar(0,255,0));\n        }\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // Save thresholds\n    fs.open(filename, cv::FileStorage::WRITE);\n    if (fs.isOpened()) {\n        cv::write(fs, \"H_MAX\", maxH);\n        cv::write(fs, \"H_MIN\", minH);\n        cv::write(fs, \"S_MAX\", maxS);\n        cv::write(fs, \"S_MIN\", minS);\n        cv::write(fs, \"V_MAX\", maxV);\n        cv::write(fs, \"V_MIN\", minV);\n        fs.release();\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_flight_animation.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Battery\n    std::cout << \"Battery = \" << ardrone.getBatteryPercentage() << \"%\" << std::endl;\n\n    // Instructions\n    std::cout << \"  Q - ARDRONE_ANIM_PHI_M30_DEG            \" << std::endl;\n    std::cout << \"  A - ARDRONE_ANIM_PHI_30_DEG             \" << std::endl;\n    std::cout << \"  Z - ARDRONE_ANIM_THETA_M30_DEG          \" << std::endl;\n    std::cout << \"  W - ARDRONE_ANIM_THETA_30_DEG           \" << std::endl;\n    std::cout << \"  S - ARDRONE_ANIM_THETA_20DEG_YAW_200DEG \" << std::endl;\n    std::cout << \"  X - ARDRONE_ANIM_THETA_20DEG_YAW_M200DEG\" << std::endl;\n    std::cout << \"  E - ARDRONE_ANIM_TURNAROUND             \" << std::endl;\n    std::cout << \"  D - ARDRONE_ANIM_TURNAROUND_GODOWN      \" << std::endl;\n    std::cout << \"  C - ARDRONE_ANIM_YAW_SHAKE              \" << std::endl;\n    std::cout << \"  R - ARDRONE_ANIM_YAW_DANCE              \" << std::endl;\n    std::cout << \"  F - ARDRONE_ANIM_PHI_DANCE              \" << std::endl;\n    std::cout << \"  V - ARDRONE_ANIM_THETA_DANCE            \" << std::endl;\n    std::cout << \"  T - ARDRONE_ANIM_VZ_DANCE               \" << std::endl;\n    std::cout << \"  G - ARDRONE_ANIM_WAVE                   \" << std::endl;\n    std::cout << \"  B - ARDRONE_ANIM_PHI_THETA_MIXED        \" << std::endl;\n    std::cout << \"  Y - ARDRONE_ANIM_DOUBLE_PHI_THETA_MIXED \" << std::endl;\n    std::cout << \"  H - ARDRONE_ANIM_FLIP_AHEAD             \" << std::endl;\n    std::cout << \"  N - ARDRONE_ANIM_FLIP_BEHIND            \" << std::endl;\n    std::cout << \"  U - ARDRONE_ANIM_FLIP_LEFT              \" << std::endl;\n    std::cout << \"  J - ARDRONE_ANIM_FLIP_RIGHT             \" << std::endl;\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // Take off / Landing \n        if (key == ' ') {\n            if (ardrone.onGround()) ardrone.takeoff();\n            else                    ardrone.landing();\n        }\n\n        // Flight animations\n        if (key == 'q') ardrone.setAnimation(ARDRONE_ANIM_PHI_M30_DEG);\n        if (key == 'a') ardrone.setAnimation(ARDRONE_ANIM_PHI_30_DEG);\n        if (key == 'z') ardrone.setAnimation(ARDRONE_ANIM_THETA_M30_DEG);\n        if (key == 'w') ardrone.setAnimation(ARDRONE_ANIM_THETA_30_DEG);\n        if (key == 's') ardrone.setAnimation(ARDRONE_ANIM_THETA_20DEG_YAW_200DEG);\n        if (key == 'x') ardrone.setAnimation(ARDRONE_ANIM_THETA_20DEG_YAW_M200DEG);\n        if (key == 'e') ardrone.setAnimation(ARDRONE_ANIM_TURNAROUND);\n        if (key == 'd') ardrone.setAnimation(ARDRONE_ANIM_TURNAROUND_GODOWN);\n        if (key == 'c') ardrone.setAnimation(ARDRONE_ANIM_YAW_SHAKE);\n        if (key == 'r') ardrone.setAnimation(ARDRONE_ANIM_YAW_DANCE);\n        if (key == 'f') ardrone.setAnimation(ARDRONE_ANIM_PHI_DANCE);\n        if (key == 'v') ardrone.setAnimation(ARDRONE_ANIM_THETA_DANCE);\n        if (key == 't') ardrone.setAnimation(ARDRONE_ANIM_VZ_DANCE);\n        if (key == 'g') ardrone.setAnimation(ARDRONE_ANIM_WAVE);\n        if (key == 'b') ardrone.setAnimation(ARDRONE_ANIM_PHI_THETA_MIXED);\n        if (key == 'y') ardrone.setAnimation(ARDRONE_ANIM_DOUBLE_PHI_THETA_MIXED);\n        if (key == 'h') ardrone.setAnimation(ARDRONE_ANIM_FLIP_AHEAD);\n        if (key == 'n') ardrone.setAnimation(ARDRONE_ANIM_FLIP_BEHIND);\n        if (key == 'u') ardrone.setAnimation(ARDRONE_ANIM_FLIP_LEFT);\n        if (key == 'j') ardrone.setAnimation(ARDRONE_ANIM_FLIP_RIGHT);\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_hog.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Initialize detector\n    cv::HOGDescriptor hog;\n    hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(1);\n        if (key == 0x1b) break;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // Detect\n        std::vector<cv::Rect> found;\n        hog.detectMultiScale(image, found, 0, cv::Size(4, 4), cv::Size(0, 0), 1.5, 2.0);\n\n        // Show bounding rect\n        std::vector<cv::Rect>::const_iterator it;\n        for (it = found.begin(); it != found.end(); ++it) {\n            cv::Rect r = *it;\n            cv::rectangle(image, r.tl(), r.br(), cv::Scalar(255, 0, 0), 2);\n        }\n\n        // Display the image\n        cv::imshow(\"hog\", image); \n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_kalman_tracking.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Thresholds\n    int minH = 0, maxH = 255;\n    int minS = 0, maxS = 255;\n    int minV = 0, maxV = 255;\n\n    // XML save data\n    std::string filename(\"thresholds.xml\");\n    cv::FileStorage fs(filename, cv::FileStorage::READ);\n\n    // If there is a save file then read it\n    if (fs.isOpened()) {\n        maxH = fs[\"H_MAX\"];\n        minH = fs[\"H_MIN\"];\n        maxS = fs[\"S_MAX\"];\n        minS = fs[\"S_MIN\"];\n        maxV = fs[\"V_MAX\"];\n        minV = fs[\"V_MIN\"];\n        fs.release();\n    }\n\n    // Create a window\n    cv::namedWindow(\"binalized\");\n    cv::createTrackbar(\"H max\", \"binalized\", &maxH, 255);\n    cv::createTrackbar(\"H min\", \"binalized\", &minH, 255);\n    cv::createTrackbar(\"S max\", \"binalized\", &maxS, 255);\n    cv::createTrackbar(\"S min\", \"binalized\", &minS, 255);\n    cv::createTrackbar(\"V max\", \"binalized\", &maxV, 255);\n    cv::createTrackbar(\"V min\", \"binalized\", &minV, 255);\n    cv::resizeWindow(\"binalized\", 0, 0);\n\n    // Kalman filter\n    cv::KalmanFilter kalman(4, 2, 0);\n\n    // Sampling time [s]\n    const double dt = 1.0;\n\n    // Transition matrix (x, y, vx, vy)\n    cv::Mat1f A(4, 4);\n    A << 1.0, 0.0,  dt, 0.0,\n         0.0, 1.0, 0.0,  dt,\n         0.0, 0.0, 1.0, 0.0,\n         0.0, 0.0, 0.0, 1.0;\n    kalman.transitionMatrix = A;\n\n    // Measurement matrix (x, y)\n    cv::Mat1f H(2, 4);\n    H << 1, 0, 0, 0,\n         0, 1, 0, 0;\n    kalman.measurementMatrix = H;\n\n    // Process noise covariance (x, y, vx, vy)\n    cv::Mat1f Q(4, 4);\n    Q << 1e-5,  0.0,  0.0,  0.0,\n          0.0, 1e-5,  0.0,  0.0,\n          0.0,  0.0, 1e-5,  0.0,\n          0.0,  0.0,  0.0, 1e-5;\n    kalman.processNoiseCov = Q;\n\n    // Measurement noise covariance (x, y)\n    cv::Mat1f R(2, 2);\n    R << 1e-1,  0.0,\n          0.0, 1e-1;\n    kalman.measurementNoiseCov = R;\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // HSV image\n        cv::Mat hsv;\n        cv::cvtColor(image, hsv, cv::COLOR_BGR2HSV_FULL);\n\n        // Binalize\n        cv::Mat binalized;\n        cv::Scalar lower(minH, minS, minV);\n        cv::Scalar upper(maxH, maxS, maxV);\n        cv::inRange(hsv, lower, upper, binalized);\n\n        // Show result\n        cv::imshow(\"binalized\", binalized);\n\n        // De-noising\n        cv::Mat kernel = getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));\n        cv::morphologyEx(binalized, binalized, cv::MORPH_CLOSE, kernel);\n        //cv::imshow(\"morphologyEx\", binalized);\n\n        // Detect contours\n        std::vector< std::vector<cv::Point> > contours;\n        cv::findContours(binalized.clone(), contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);\n\n        // Find the largest contour\n        int contour_index = -1;\n        double max_area = 0.0;\n        for (size_t i = 0; i < contours.size(); i++) {\n            double area = fabs(cv::contourArea(contours[i]));\n            if (area > max_area) {\n                contour_index = i;\n                max_area = area;\n            }\n        }\n\n        // Object detected\n        if (contour_index >= 0) {\n            // Moments\n            cv::Moments moments = cv::moments(contours[contour_index], true);\n            double marker_y = (int)(moments.m01 / moments.m00);\n            double marker_x = (int)(moments.m10 / moments.m00);\n\n            // Measurements\n            cv::Mat measurement = (cv::Mat1f(2, 1) << marker_x, marker_y);\n\n            // Correction\n            cv::Mat estimated = kalman.correct(measurement);\n\n            // Show result\n            cv::Rect rect = cv::boundingRect(contours[contour_index]);\n            cv::rectangle(image, rect, cv::Scalar(0, 255, 0));\n        }\n\n        // Prediction\n        cv::Mat1f prediction = kalman.predict();\n        int radius = 1e+3 * kalman.errorCovPre.at<float>(0, 0);\n\n        // Show predicted position\n        cv::circle(image, cv::Point(prediction(0, 0), prediction(0, 1)), radius, cv::Scalar(0, 255, 0), 2);\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // Save thresholds\n    fs.open(filename, cv::FileStorage::WRITE);\n    if (fs.isOpened()) {\n        cv::write(fs, \"H_MAX\", maxH);\n        cv::write(fs, \"H_MIN\", minH);\n        cv::write(fs, \"S_MAX\", maxS);\n        cv::write(fs, \"S_MIN\", minS);\n        cv::write(fs, \"V_MAX\", maxV);\n        cv::write(fs, \"V_MIN\", minV);\n        fs.release();\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_led_animation.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Instructions\n    std::cout << \"  Q - BLINK_GREEN_RED             \" << std::endl;\n    std::cout << \"  A - BLINK_GREEN                 \" << std::endl;\n    std::cout << \"  Z - BLINK_RED                   \" << std::endl;\n    std::cout << \"  W - BLINK_ORANGE                \" << std::endl;\n    std::cout << \"  S - SNAKE_GREEN_RED             \" << std::endl;\n    std::cout << \"  X - FIRE                        \" << std::endl;\n    std::cout << \"  E - STANDARD                    \" << std::endl;\n    std::cout << \"  D - RED                         \" << std::endl;\n    std::cout << \"  C - GREEN                       \" << std::endl;\n    std::cout << \"  R - RED_SNAKE                   \" << std::endl;\n    std::cout << \"  F - BLANK                       \" << std::endl;\n    std::cout << \"  V - RIGHT_MISSILE               \" << std::endl;\n    std::cout << \"  T - LEFT_MISSILE                \" << std::endl;\n    std::cout << \"  G - DOUBLE_MISSILE              \" << std::endl;\n    std::cout << \"  B - FRONT_LEFT_GREEN_OTHERS_RED \" << std::endl;\n    std::cout << \"  Y - FRONT_RIGHT_GREEN_OTHERS_RED\" << std::endl;\n    std::cout << \"  H - REAR_RIGHT_GREEN_OTHERS_RED \" << std::endl;\n    std::cout << \"  N - REAR_LEFT_GREEN_OTHERS_RED  \" << std::endl;\n    std::cout << \"  U - LEFT_GREEN_RIGHT_RED        \" << std::endl;\n    std::cout << \"  J - LEFT_RED_RIGHT_GREEN        \" << std::endl;\n    std::cout << \"  M - BLINK_STANDARD              \" << std::endl;\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // LED animations\n        if (key == 'q') ardrone.setLED(ARDRONE_LED_ANIM_BLINK_GREEN_RED);\n        if (key == 'a') ardrone.setLED(ARDRONE_LED_ANIM_BLINK_GREEN);\n        if (key == 'z') ardrone.setLED(ARDRONE_LED_ANIM_BLINK_RED);\n        if (key == 'w') ardrone.setLED(ARDRONE_LED_ANIM_BLINK_ORANGE);\n        if (key == 's') ardrone.setLED(ARDRONE_LED_ANIM_SNAKE_GREEN_RED);\n        if (key == 'x') ardrone.setLED(ARDRONE_LED_ANIM_FIRE);\n        if (key == 'e') ardrone.setLED(ARDRONE_LED_ANIM_STANDARD);\n        if (key == 'd') ardrone.setLED(ARDRONE_LED_ANIM_RED);\n        if (key == 'c') ardrone.setLED(ARDRONE_LED_ANIM_GREEN);\n        if (key == 'r') ardrone.setLED(ARDRONE_LED_ANIM_RED_SNAKE);\n        if (key == 'f') ardrone.setLED(ARDRONE_LED_ANIM_BLANK);\n        if (key == 'v') ardrone.setLED(ARDRONE_LED_ANIM_RIGHT_MISSILE);\n        if (key == 't') ardrone.setLED(ARDRONE_LED_ANIM_LEFT_MISSILE);\n        if (key == 'g') ardrone.setLED(ARDRONE_LED_ANIM_DOUBLE_MISSILE);\n        if (key == 'b') ardrone.setLED(ARDRONE_LED_ANIM_FRONT_LEFT_GREEN_OTHERS_RED);\n        if (key == 'y') ardrone.setLED(ARDRONE_LED_ANIM_FRONT_RIGHT_GREEN_OTHERS_RED);\n        if (key == 'h') ardrone.setLED(ARDRONE_LED_ANIM_REAR_RIGHT_GREEN_OTHERS_RED);\n        if (key == 'n') ardrone.setLED(ARDRONE_LED_ANIM_REAR_LEFT_GREEN_OTHERS_RED);\n        if (key == 'u') ardrone.setLED(ARDRONE_LED_ANIM_LEFT_GREEN_RIGHT_RED);\n        if (key == 'j') ardrone.setLED(ARDRONE_LED_ANIM_LEFT_RED_RIGHT_GREEN);\n        if (key == 'm') ardrone.setLED(ARDRONE_LED_ANIM_BLINK_STANDARD);\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_marker_based_ar.cpp",
    "content": "﻿// C++ STL\n#include <iostream>\n#include <fstream>\n\n// OpenCV\n#include <opencv2/opencv.hpp>\n\n// OpenGL\n#include <GL/glut.h>\n\n// AR.Drone\n#include \"ardrone/ardrone.h\"\n\n// Marker detector\n#include \".\\3rdparty\\packtpub\\MarkerDetector.hpp\"\n\n// Parameter for calibration pattern\n#define PAT_ROWS   (7)                  // Rows of pattern\n#define PAT_COLS   (10)                 // Columns of pattern\n#define CHESS_SIZE (24.0)               // Size of a pattern [mm]\n\n// Global variables\nARDrone ardrone;\ncv::Mat mapx, mapy;\nCameraCalibration calibration;\n\n// --------------------------------------------------------------------------\n// buildProjectionMatrix(Camera matrix, Screen width, Screen height)\n// Description  : Calculate projection matrix from camera and screen paremeters.\n// Return value : Projection matrix\n// --------------------------------------------------------------------------\nMatrix44 buildProjectionMatrix(Matrix33 cameraMatrix, int screen_width, int screen_height)\n{\n\tfloat d_near = 0.01;  // Near clipping distance\n\tfloat d_far = 100;    // Far clipping distance\n\n\t// Camera parameters\n\tfloat f_x = cameraMatrix.data[0]; // Focal length in x axis\n\tfloat f_y = cameraMatrix.data[4]; // Focal length in y axis (usually the same?)\n\tfloat c_x = cameraMatrix.data[2]; // Camera primary point x\n\tfloat c_y = cameraMatrix.data[5]; // Camera primary point y\n\n\tMatrix44 projectionMatrix;\n\tprojectionMatrix.data[0] = -2.0 * f_x / screen_width;\n\tprojectionMatrix.data[1] = 0.0;\n\tprojectionMatrix.data[2] = 0.0;\n\tprojectionMatrix.data[3] = 0.0;\n\n\tprojectionMatrix.data[4] = 0.0;\n\tprojectionMatrix.data[5] = 2.0 * f_y / screen_height;\n\tprojectionMatrix.data[6] = 0.0;\n\tprojectionMatrix.data[7] = 0.0;\n\n\tprojectionMatrix.data[8] = 2.0 * c_x / screen_width - 1.0;\n\tprojectionMatrix.data[9] = 2.0 * c_y / screen_height - 1.0;\n\tprojectionMatrix.data[10] = -(d_far + d_near) / (d_far - d_near);\n\tprojectionMatrix.data[11] = -1.0;\n\n\tprojectionMatrix.data[12] = 0.0;\n\tprojectionMatrix.data[13] = 0.0;\n\tprojectionMatrix.data[14] = -2.0 * d_far * d_near / (d_far - d_near);\n\tprojectionMatrix.data[15] = 0.0;\n\n\treturn projectionMatrix;\n}\n\n// --------------------------------------------------------------------------\n// idle()\n// Description  : Idle function.\n// Return value : NONE\n// --------------------------------------------------------------------------\nvoid idle(void)\n{\n\t// Redisplay\n\tglutPostRedisplay();\n}\n\n// --------------------------------------------------------------------------\n// display()\n// Description  : Displaying function.\n// Return value : NONE\n// --------------------------------------------------------------------------\nvoid display(void)\n{\n\t// Clear the buffers\n\tglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n\n\t// Get an image\n\tcv::Mat image_raw = ardrone.getImage();\n\tcv::Mat image;\n\tcv::remap(image_raw, image, mapx, mapy, cv::INTER_LINEAR);\n\n\t// Show the image\n\tcv::Mat rgb;\n\tcv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);\n\tcv::flip(rgb, rgb, 0);\n\tglDepthMask(GL_FALSE);\n\tglDrawPixels(rgb.cols, rgb.rows, GL_RGB, GL_UNSIGNED_BYTE, rgb.data);\n\n\t// Convert to BGRA\n\tcv::Mat bgra;\n\tcv::cvtColor(image, bgra, cv::COLOR_BGR2BGRA);\n\n\t// Prepare for marker detection\n\tBGRAVideoFrame frame;\n\tframe.width = bgra.cols;\n\tframe.height = bgra.rows;\n\tframe.data = bgra.data;\n\tframe.stride = bgra.step;\n\n\t// Detect marker(s)\n\tMarkerDetector detector(calibration);\n\tdetector.processFrame(frame);\n\tstd::vector<Transformation> transformations = detector.getTransformations();\n\n\t// Calculate projection matrix\n\tMatrix44 projectionMatrix = buildProjectionMatrix(calibration.getIntrinsic(), frame.width, frame.height);\n\n\t// Apply the projection matrix\n\tglMatrixMode(GL_PROJECTION);\n\tglLoadMatrixf(projectionMatrix.data);\n\n\t// Change to model view matrix mode\n\tglMatrixMode(GL_MODELVIEW);\n\tglLoadIdentity();\n\n\t// Enable depth mask\n\tglDepthMask(GL_TRUE);\n\n\t// Enable vertex array\n\tglEnableClientState(GL_VERTEX_ARRAY);\n\tglEnableClientState(GL_COLOR_ARRAY);\n\n\t// Push current model view matrix\n\tglPushMatrix();\n\n\t// Set line width\n\tglLineWidth(3.0f);\n\n\t// Vertex arrays\n\tfloat lineX[] = { 0, 0, 0, 1, 0, 0 };\n\tfloat lineY[] = { 0, 0, 0, 0, 1, 0 };\n\tfloat lineZ[] = { 0, 0, 0, 0, 0, 1 };\n\n\t// 2D plane\n\tconst GLfloat squareVertices[] = {-0.5f, -0.5f,\n\t\t\t\t\t\t\t\t\t   0.5f, -0.5f,\n\t\t\t\t\t\t\t\t\t  -0.5f,  0.5f,\n\t\t\t\t\t\t\t\t\t   0.5f,  0.5f};\n\n\t// 2D plane color (RGBA)\n\tconst GLubyte squareColors[] = {255, 255,   0, 255,\n\t\t\t\t\t\t\t\t      0, 255, 255, 255,\n\t\t\t\t\t\t\t\t\t  0,   0,   0,   0,\n\t\t\t\t\t\t\t\t\t255,   0, 255, 255};\n\n\t// Draw AR\n\tfor (size_t i = 0; i < transformations.size(); i++) {\n\t\t// Get transformation\n\t\tconst Transformation &transformation = transformations[i];\n\t\tMatrix44 glMatrix = transformation.getMat44();\n\n\t\t// Load it\n\t\tglLoadMatrixf(reinterpret_cast<const GLfloat*>(&glMatrix.data[0]));\n\n\t\t// Draw 2D plane\n\t\tglEnableClientState(GL_COLOR_ARRAY);\n\t\tglVertexPointer(2, GL_FLOAT, 0, squareVertices);\n\t\tglColorPointer(4, GL_UNSIGNED_BYTE, 0, squareColors);\n\t\tglDrawArrays(GL_TRIANGLE_STRIP, 0, 4);\n\t\tglDisableClientState(GL_COLOR_ARRAY);\n\n\t\t// Scale of coordinate axes\n\t\tfloat scale = 0.5;\n\t\tglScalef(scale, scale, scale);\n\n\t\t// Move it a little\n\t\tglTranslatef(0, 0, 0.1f);\n\n\t\t// X axis\n\t\tglColor4f(1.0f, 0.0f, 0.0f, 1.0f);\n\t\tglVertexPointer(3, GL_FLOAT, 0, lineX);\n\t\tglDrawArrays(GL_LINES, 0, 2);\n\n\t\t// Y axis\n\t\tglColor4f(0.0f, 1.0f, 0.0f, 1.0f);\n\t\tglVertexPointer(3, GL_FLOAT, 0, lineY);\n\t\tglDrawArrays(GL_LINES, 0, 2);\n\n\t\t// Z axis\n\t\tglColor4f(0.0f, 0.0f, 1.0f, 1.0f);\n\t\tglVertexPointer(3, GL_FLOAT, 0, lineZ);\n\t\tglDrawArrays(GL_LINES, 0, 2);\n\t}\n\n\t// Disable vertex array\n\tglDisableClientState(GL_VERTEX_ARRAY);\n\tglDisableClientState(GL_COLOR_ARRAY);\n\n\t// Pop the model view matrix\n\tglPopMatrix();\n\n\t// Swap the buffer\n\tglutSwapBuffers();\n}\n\n// --------------------------------------------------------------------------\n// key(Key pressed, X position of cursor, Y position of cursor)\n// Description  : Key input function.\n// Return value : NONE\n// --------------------------------------------------------------------------\nvoid key(unsigned char key, int x, int y) {\n\tswitch (key) {\n\tcase 0x1b:\n\t\texit(1);\n\t\tbreak;\n\tdefault:\n\t\tbreak;\n\t}\n}\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // Initialize\n\tif (!ardrone.open()) {\n\t\tstd::cout << \"Failed to initialize.\" << std::endl;\n\t\treturn -1;\n\t}\n\n    // Images\n\tcv::Mat frame = ardrone.getImage();\n\n\t// Open XML file\n    std::string filename(\"camera.xml\");\n    std::fstream file(filename.c_str(), std::ios::in);\n\n    // Not found\n    if (!file.is_open()) {\n        // Image buffer\n\t\tstd::vector<cv::Mat> images;\n\t\tstd::cout << \"Press Space key to capture an image\" << std::endl;\n\t\tstd::cout << \"Press Esc to exit\" << std::endl;\n\n\t\t// Main loop\n\t\twhile (1) {\n\t\t\t// Key iput\n\t\t\tint key = cv::waitKey(1);\n\t\t\tif (key == 0x1b) break;\n\n\t\t\t// Get an image\n\t\t\tframe = ardrone.getImage();\n\n\t\t\t// Convert to grayscale\n\t\t\tcv::Mat gray;\n\t\t\tcv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);\n\n\t\t\t// Detect a chessboard\n\t\t\tcv::Size size(PAT_COLS, PAT_ROWS);\n\t\t\tstd::vector<cv::Point2f> corners;\n\t\t\tbool found = cv::findChessboardCorners(gray, size, corners, cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE | cv::CALIB_CB_FAST_CHECK);\n\n\t\t\t// Chessboard detected\n\t\t\tif (found) {\n\t\t\t\t// Draw it\n\t\t\t\tcv::drawChessboardCorners(frame, size, corners, found);\n\n\t\t\t\t// Space key was pressed\n\t\t\t\tif (key == ' ') {\n\t\t\t\t\t// Add to buffer\n\t\t\t\t\timages.push_back(gray);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Show the image\n\t\t\tstd::ostringstream stream;\n\t\t\tstream << \"Captured \" << images.size() << \" image(s).\";\n\t\t\tcv::putText(frame, stream.str(), cv::Point(10, 20), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 1, cv::LINE_AA);\n\t\t\tcv::imshow(\"Camera Calibration\", frame);\n\t\t}\n\n\t\t// We have enough samples\n\t\tif (images.size() > 4) {\n\t\t\tcv::Size size(PAT_COLS, PAT_ROWS);\n\t\t\tstd::vector<std::vector<cv::Point2f>> corners2D;\n\t\t\tstd::vector<std::vector<cv::Point3f>> corners3D;\n\n\t\t\tfor (size_t i = 0; i < images.size(); i++) {\n\t\t\t\t// Detect a chessboard\n\t\t\t\tstd::vector<cv::Point2f> tmp_corners2D;\n\t\t\t\tbool found = cv::findChessboardCorners(images[i], size, tmp_corners2D);\n\n\t\t\t\t// Chessboard detected\n\t\t\t\tif (found) {\n\t\t\t\t\t// Convert the corners to sub-pixel\n\t\t\t\t\tcv::cornerSubPix(images[i], tmp_corners2D, cvSize(11, 11), cvSize(-1, -1), cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 30, 0.1));\n\t\t\t\t\tcorners2D.push_back(tmp_corners2D);\n\n\t\t\t\t\t// Set the 3D position of patterns\n\t\t\t\t\tconst float squareSize = CHESS_SIZE;\n\t\t\t\t\tstd::vector<cv::Point3f> tmp_corners3D;\n\t\t\t\t\tfor (int j = 0; j < size.height; j++) {\n\t\t\t\t\t\tfor (int k = 0; k < size.width; k++) {\n\t\t\t\t\t\t\ttmp_corners3D.push_back(cv::Point3f((float)(k*squareSize), (float)(j*squareSize), 0.0));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tcorners3D.push_back(tmp_corners3D);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Estimate camera parameters\n\t\t\tcv::Mat cameraMatrix, distCoeffs;\n\t\t\tstd::vector<cv::Mat> rvec, tvec;\n\t\t\tcv::calibrateCamera(corners3D, corners2D, images[0].size(), cameraMatrix, distCoeffs, rvec, tvec);\n\t\t\tstd::cout << cameraMatrix << std::endl;\n\t\t\tstd::cout << distCoeffs << std::endl;\n\n\t\t\t// Save them\n\t\t\tcv::FileStorage fs(filename, cv::FileStorage::WRITE);\n\t\t\tfs << \"intrinsic\" << cameraMatrix;\n\t\t\tfs << \"distortion\" << distCoeffs;\n\t\t}\n\n\t\t// Destroy windows\n\t\tcv::destroyAllWindows();\n\t}\n\n\t// Open XML file\n\tcv::FileStorage rfs(filename, cv::FileStorage::READ);\n\tif (!rfs.isOpened()) {\n\t\tstd::cout << \"Failed to open the XML file\" << std::endl;\n\t\treturn -1;\n\t}\n\n\t// Load camera parameters\n\tcv::Mat cameraMatrix, distCoeffs;\n\trfs[\"intrinsic\"] >> cameraMatrix;\n\trfs[\"distortion\"] >> distCoeffs;\n\n\t// Create undistort map\n\tcv::initUndistortRectifyMap(cameraMatrix, distCoeffs, cv::Mat(), cameraMatrix, frame.size(), CV_32FC1, mapx, mapy);\n\n\t// Set camera parameters\n\tfloat fx = cameraMatrix.at<double>(0, 0);\n\tfloat fy = cameraMatrix.at<double>(1, 1);\n\tfloat cx = cameraMatrix.at<double>(0, 2);\n\tfloat cy = cameraMatrix.at<double>(1, 2);\n\t//calibration = CameraCalibration(fx, fy, cx, cy);\n    calibration = CameraCalibration(fx, fy, frame.cols / 2, frame.rows / 2);\n\n\t// Initialize GLUT\n\tglutInit(&argc, argv);\n\tglutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);\n\tglutInitWindowSize(frame.cols, frame.rows);\n\tglutCreateWindow(\"Mastering OpenCV with Practical Computer Vision Project\");\n\tglutDisplayFunc(display);\n\tglutKeyboardFunc(key);\n\tglutIdleFunc(idle);\n\n\t// Clea scene\n\tglClearColor(0.0, 0.0, 1.0, 1.0);\n\tglEnable(GL_DEPTH_TEST);\n\n\t// Start main loop\n\tglutMainLoop();\n\n\treturn 0;\n}"
  },
  {
    "path": "samples/sample_minimal.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone(\"192.168.1.1\");\n\n    // Main loop\n    while (1) {\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n\n        // Press Esc to exit\n        if (cv::waitKey(1) == 0x1b) break;\n    }\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_navdata.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Get an image\n        cv::Mat image= ardrone.getImage();\n\n        // Orientation\n        double roll  = ardrone.getRoll();\n        double pitch = ardrone.getPitch();\n        double yaw   = ardrone.getYaw();\n        std::cout << \"ardrone.roll  = \" << roll  * RAD_TO_DEG << \" [deg]\" << std::endl;\n        std::cout << \"ardrone.pitch = \" << pitch * RAD_TO_DEG << \" [deg]\" << std::endl;\n        std::cout << \"ardrone.yaw   = \" << yaw   * RAD_TO_DEG << \" [deg]\" << std::endl;\n\n        // Altitude\n        double altitude = ardrone.getAltitude();\n        std::cout << \"ardrone.altitude = \" << altitude << \" [m]\" << std::endl;\n\n        // Velocity\n        double v_x, v_y, v_z;\n        double velocity = ardrone.getVelocity(&v_x, &v_y, &v_z);\n        std::cout << \"ardrone.vx = \" << v_x << \" [m/s]\" << std::endl;\n        std::cout << \"ardrone.vy = \" << v_y << \" [m/s]\" << std::endl;\n        std::cout << \"ardrone.vz = \" << v_z << \" [m/s]\" << std::endl;\n\n        // Battery\n        int battery = ardrone.getBatteryPercentage();\n        std::cout << \"ardrone.battery = \" << battery << \" [%]\" << std::endl;\n\n        // Take off / Landing\n        if (key == ' ') {\n            if (ardrone.onGround()) ardrone.takeoff();\n            else                    ardrone.landing();\n        }\n\n        // Move\n        double vx = 0.0, vy = 0.0, vz = 0.0, vr = 0.0;\n        if (key == 'i' || key == CV_VK_UP)    vx =  1.0;\n        if (key == 'k' || key == CV_VK_DOWN)  vx = -1.0;\n        if (key == 'u' || key == CV_VK_LEFT)  vr =  1.0;\n        if (key == 'o' || key == CV_VK_RIGHT) vr = -1.0;\n        if (key == 'j') vy =  1.0;\n        if (key == 'l') vy = -1.0;\n        if (key == 'q') vz =  1.0;\n        if (key == 'a') vz = -1.0;\n        ardrone.move3D(vx, vy, vz, vr);\n\n        // Change camera\n        static int mode = 0;\n        if (key == 'c') ardrone.setCamera(++mode%4);\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_optical_flow.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Get an image\n    cv::Mat prev_image = ardrone.getImage();\n\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // Convert the camera image to grayscale\n        cv::Mat prev_gray, new_gray;\n        cv::cvtColor(image, new_gray, cv::COLOR_BGR2GRAY);\n        cv::cvtColor(prev_image, prev_gray, cv::COLOR_BGR2GRAY);\n\n        // Detect corners\n        int max_corners = 50;\n        std::vector<cv::Point2f> prev_corners;\n        std::vector<cv::Point2f> new_corners;\n        cv::goodFeaturesToTrack(prev_gray, prev_corners, max_corners, 0.1, 5.0);\n        cv::goodFeaturesToTrack(new_gray, new_corners, max_corners, 0.1, 5.0);\n\n        // Calclate optical flow\n        std::vector<unsigned char> status;\n        std::vector<float> errors;\n        cv::calcOpticalFlowPyrLK(prev_gray, new_gray, prev_corners, new_corners, status, errors);\n\n        // Save the last frame\n        image.copyTo(prev_image);\n\n        // Draw optical flow\n        for (size_t i = 0; i < status.size(); i++) {\n            cv::Point p0(ceil(prev_corners[i].x), ceil(prev_corners[i].y));\n            cv::Point p1(ceil(new_corners[i].x), ceil(new_corners[i].y));\n            cv::line(image, p0, p1, cv::Scalar(0, 255, 0), 2);\n        }\n\n        // Change camera\n        static int mode = 0;\n        if (key == 'c') ardrone.setCamera(++mode % 4);\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_tracking.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Battery\n    std::cout << \"Battery = \" << ardrone.getBatteryPercentage() << \"%\" << std::endl;\n\n    // Instructions\n    std::cout << \"***************************************\" << std::endl;\n    std::cout << \"*       CV Drone sample program       *\" << std::endl;\n    std::cout << \"*           - How to Play -           *\" << std::endl;\n    std::cout << \"***************************************\" << std::endl;\n    std::cout << \"*                                     *\" << std::endl;\n    std::cout << \"* - Controls -                        *\" << std::endl;\n    std::cout << \"*    'Space' -- Takeoff/Landing       *\" << std::endl;\n    std::cout << \"*    'Up'    -- Move forward          *\" << std::endl;\n    std::cout << \"*    'Down'  -- Move backward         *\" << std::endl;\n    std::cout << \"*    'Left'  -- Turn left             *\" << std::endl;\n    std::cout << \"*    'Right' -- Turn right            *\" << std::endl;\n    std::cout << \"*    'Q'     -- Move upward           *\" << std::endl;\n    std::cout << \"*    'A'     -- Move downward         *\" << std::endl;\n    std::cout << \"*                                     *\" << std::endl;\n    std::cout << \"* - Others -                          *\" << std::endl;\n    std::cout << \"*    'T'     -- Track marker          *\" << std::endl;\n    std::cout << \"*    'C'     -- Change camera         *\" << std::endl;\n    std::cout << \"*    'Esc'   -- Exit                  *\" << std::endl;\n    std::cout << \"*                                     *\" << std::endl;\n    std::cout << \"***************************************\" << std::endl;\n\n    // Thresholds\n    int minH = 0, maxH = 255;\n    int minS = 0, maxS = 255;\n    int minV = 0, maxV = 255;\n\n    // XML save data\n    std::string filename(\"thresholds.xml\");\n    cv::FileStorage fs(filename, cv::FileStorage::READ);\n\n    // If there is a save file then read it\n    if (fs.isOpened()) {\n        maxH = fs[\"H_MAX\"];\n        minH = fs[\"H_MIN\"];\n        maxS = fs[\"S_MAX\"];\n        minS = fs[\"S_MIN\"];\n        maxV = fs[\"V_MAX\"];\n        minV = fs[\"V_MIN\"];\n        fs.release();\n    }\n\n    // Create a window\n    cv::namedWindow(\"binalized\");\n    cv::createTrackbar(\"H max\", \"binalized\", &maxH, 255);\n    cv::createTrackbar(\"H min\", \"binalized\", &minH, 255);\n    cv::createTrackbar(\"S max\", \"binalized\", &maxS, 255);\n    cv::createTrackbar(\"S min\", \"binalized\", &minS, 255);\n    cv::createTrackbar(\"V max\", \"binalized\", &maxV, 255);\n    cv::createTrackbar(\"V min\", \"binalized\", &minV, 255);\n    cv::resizeWindow(\"binalized\", 0, 0);\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Take off / Landing \n        if (key == ' ') {\n            if (ardrone.onGround()) ardrone.takeoff();\n            else                    ardrone.landing();\n        }\n\n        // Move\n        double vx = 0.0, vy = 0.0, vz = 0.0, vr = 0.0;\n        if (key == 'i' || key == CV_VK_UP)    vx =  1.0;\n        if (key == 'k' || key == CV_VK_DOWN)  vx = -1.0;\n        if (key == 'u' || key == CV_VK_LEFT)  vr =  1.0;\n        if (key == 'o' || key == CV_VK_RIGHT) vr = -1.0;\n        if (key == 'j') vy =  1.0;\n        if (key == 'l') vy = -1.0;\n        if (key == 'q') vz =  1.0;\n        if (key == 'a') vz = -1.0;\n\n        // Change camera\n        static int mode = 0;\n        if (key == 'c') ardrone.setCamera(++mode % 4);\n\n        // Switch tracking ON/OFF\n        static int track = 0;\n        if (key == 't') track = !track;\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // HSV image\n        cv::Mat hsv;\n        cv::cvtColor(image, hsv, cv::COLOR_BGR2HSV_FULL);\n\n        // Binalize\n        cv::Mat binalized;\n        cv::Scalar lower(minH, minS, minV);\n        cv::Scalar upper(maxH, maxS, maxV);\n        cv::inRange(hsv, lower, upper, binalized);\n\n        // Show result\n        cv::imshow(\"binalized\", binalized);\n\n        // De-noising\n        cv::Mat kernel = getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));\n        cv::morphologyEx(binalized, binalized, cv::MORPH_CLOSE, kernel);\n        //cv::imshow(\"morphologyEx\", binalized);\n\n        // Detect contours\n        std::vector< std::vector<cv::Point> > contours;\n        cv::findContours(binalized.clone(), contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);\n\n        // Find largest contour\n        int contour_index = -1;\n        double max_area = 0.0;\n        for (size_t i = 0; i < contours.size(); i++) {\n            double area = fabs(cv::contourArea(contours[i]));\n            if (area > max_area) {\n                contour_index = i;\n                max_area = area;\n            }\n        }\n\n        // Object detected\n        if (contour_index >= 0) {\n            // Moments\n            cv::Moments moments = cv::moments(contours[contour_index], true);\n            double marker_y = (int)(moments.m01 / moments.m00);\n            double marker_x = (int)(moments.m10 / moments.m00);\n\n            // Show result\n            cv::Rect rect = cv::boundingRect(contours[contour_index]);\n            cv::rectangle(image, rect, cv::Scalar(0, 255, 0));\n\n            // Tracking\n            if (track) {\n                const double kp = 0.005;\n                vx = 0.1;\n                vy = 0.0;\n                vz = kp * (binalized.rows / 2 - marker_y);\n                vr = kp * (binalized.cols / 2 - marker_x);\n            }\n        }\n\n        // Display the image\n        cv::putText(image, (track) ? \"track on\" : \"track off\", cv::Point(10, 20), cv::FONT_HERSHEY_SIMPLEX, 0.5, (track) ? cv::Scalar(0, 0, 255) : cv::Scalar(0, 255, 0), 1, cv::LINE_AA);\n        cv::imshow(\"camera\", image);\n        ardrone.move3D(vx, vy, vz, vr);\n    }\n\n    // Save thresholds\n    fs.open(filename, cv::FileStorage::WRITE);\n    if (fs.isOpened()) {\n        cv::write(fs, \"H_MAX\", maxH);\n        cv::write(fs, \"H_MIN\", minH);\n        cv::write(fs, \"S_MAX\", maxS);\n        cv::write(fs, \"S_MIN\", minS);\n        cv::write(fs, \"V_MAX\", maxV);\n        cv::write(fs, \"V_MIN\", minV);\n        fs.release();\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_video_record.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Recording flag\n    bool rec = false;\n    std::cout << \"Press 'R' to start/stop recording.\" << std::endl;\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(1);\n        if (key == 0x1b) break;\n\n        // Video recording start / stop\n        if (key == 'r') {\n            rec = !rec;\n            ardrone.setVideoRecord(rec);\n        }\n\n        // Get an image\n        cv::Mat image = ardrone.getImage();\n\n        // Show recording state\n        if (rec) cv::putText(image, \"REC\", cv::Point(10, 20), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);\n\n        // Display the image\n        cv::imshow(\"camera\", image);\n    }\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "samples/sample_video_writer.cpp",
    "content": "#include \"ardrone/ardrone.h\"\n\n// For std::localtime();\n#include <ctime>\n\n// --------------------------------------------------------------------------\n// main(Number of arguments, Argument values)\n// Description  : This is the entry point of the program.\n// Return value : SUCCESS:0  ERROR:-1\n// --------------------------------------------------------------------------\nint main(int argc, char *argv[])\n{\n    // AR.Drone class\n    ARDrone ardrone;\n\n    // Initialize\n    if (!ardrone.open()) {\n        std::cout << \"Failed to initialize.\" << std::endl;\n        return -1;\n    }\n\n    // Image of AR.Drone's camera\n    cv::Mat image = ardrone.getImage();\n    \n    // Video name\n    std::time_t t = std::time(NULL);\n    std::tm *local = std::localtime(&t);\n    std::ostringstream stream;\n    stream << 1900 + local->tm_year << \"-\" << 1 + local->tm_mon << \"-\" << local->tm_mday << \"-\" << local->tm_hour << \"-\" << local->tm_min << \"-\" << local->tm_sec << \".avi\";\n\n    // Create a video writer\n    cv::VideoWriter writer(stream.str(), cv::VideoWriter::fourcc('D', 'I', 'B', ' '), 30, cv::Size(image.cols, image.rows));\n\n    // Main loop\n    while (1) {\n        // Key input\n        int key = cv::waitKey(33);\n        if (key == 0x1b) break;\n\n        // Get an image\n        image = ardrone.getImage();\n\n        // Write a frame\n        writer << image;\n\n        // Display the image\n        imshow(\"camera\", image);\n    }\n\n    // Output the video\n    writer.release();\n\n    // See you\n    ardrone.close();\n\n    return 0;\n}"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/inttypes.h",
    "content": "// ISO C9x  compliant inttypes.h for Microsoft Visual Studio\n// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 \n// \n//  Copyright (c) 2006 Alexander Chemeris\n// \n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n// \n//   1. Redistributions of source code must retain the above copyright notice,\n//      this list of conditions and the following disclaimer.\n// \n//   2. Redistributions in binary form must reproduce the above copyright\n//      notice, this list of conditions and the following disclaimer in the\n//      documentation and/or other materials provided with the distribution.\n// \n//   3. The name of the author may be used to endorse or promote products\n//      derived from this software without specific prior written permission.\n// \n// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED\n// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\n// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n// \n///////////////////////////////////////////////////////////////////////////////\n\n#ifndef _MSC_VER // [\n#error \"Use this header only with Microsoft Visual C++ compilers!\"\n#endif // _MSC_VER ]\n\n#ifndef _MSC_INTTYPES_H_ // [\n#define _MSC_INTTYPES_H_\n\n#if _MSC_VER > 1000\n#pragma once\n#endif\n\n#include \"stdint.h\"\n\n// 7.8 Format conversion of integer types\n\ntypedef struct {\n   intmax_t quot;\n   intmax_t rem;\n} imaxdiv_t;\n\n// 7.8.1 Macros for format specifiers\n\n#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [   See footnote 185 at page 198\n\n// The fprintf macros for signed integers are:\n#define PRId8       \"d\"\n#define PRIi8       \"i\"\n#define PRIdLEAST8  \"d\"\n#define PRIiLEAST8  \"i\"\n#define PRIdFAST8   \"d\"\n#define PRIiFAST8   \"i\"\n\n#define PRId16       \"hd\"\n#define PRIi16       \"hi\"\n#define PRIdLEAST16  \"hd\"\n#define PRIiLEAST16  \"hi\"\n#define PRIdFAST16   \"hd\"\n#define PRIiFAST16   \"hi\"\n\n#define PRId32       \"I32d\"\n#define PRIi32       \"I32i\"\n#define PRIdLEAST32  \"I32d\"\n#define PRIiLEAST32  \"I32i\"\n#define PRIdFAST32   \"I32d\"\n#define PRIiFAST32   \"I32i\"\n\n#define PRId64       \"I64d\"\n#define PRIi64       \"I64i\"\n#define PRIdLEAST64  \"I64d\"\n#define PRIiLEAST64  \"I64i\"\n#define PRIdFAST64   \"I64d\"\n#define PRIiFAST64   \"I64i\"\n\n#define PRIdMAX     \"I64d\"\n#define PRIiMAX     \"I64i\"\n\n#define PRIdPTR     \"Id\"\n#define PRIiPTR     \"Ii\"\n\n// The fprintf macros for unsigned integers are:\n#define PRIo8       \"o\"\n#define PRIu8       \"u\"\n#define PRIx8       \"x\"\n#define PRIX8       \"X\"\n#define PRIoLEAST8  \"o\"\n#define PRIuLEAST8  \"u\"\n#define PRIxLEAST8  \"x\"\n#define PRIXLEAST8  \"X\"\n#define PRIoFAST8   \"o\"\n#define PRIuFAST8   \"u\"\n#define PRIxFAST8   \"x\"\n#define PRIXFAST8   \"X\"\n\n#define PRIo16       \"ho\"\n#define PRIu16       \"hu\"\n#define PRIx16       \"hx\"\n#define PRIX16       \"hX\"\n#define PRIoLEAST16  \"ho\"\n#define PRIuLEAST16  \"hu\"\n#define PRIxLEAST16  \"hx\"\n#define PRIXLEAST16  \"hX\"\n#define PRIoFAST16   \"ho\"\n#define PRIuFAST16   \"hu\"\n#define PRIxFAST16   \"hx\"\n#define PRIXFAST16   \"hX\"\n\n#define PRIo32       \"I32o\"\n#define PRIu32       \"I32u\"\n#define PRIx32       \"I32x\"\n#define PRIX32       \"I32X\"\n#define PRIoLEAST32  \"I32o\"\n#define PRIuLEAST32  \"I32u\"\n#define PRIxLEAST32  \"I32x\"\n#define PRIXLEAST32  \"I32X\"\n#define PRIoFAST32   \"I32o\"\n#define PRIuFAST32   \"I32u\"\n#define PRIxFAST32   \"I32x\"\n#define PRIXFAST32   \"I32X\"\n\n#define PRIo64       \"I64o\"\n#define PRIu64       \"I64u\"\n#define PRIx64       \"I64x\"\n#define PRIX64       \"I64X\"\n#define PRIoLEAST64  \"I64o\"\n#define PRIuLEAST64  \"I64u\"\n#define PRIxLEAST64  \"I64x\"\n#define PRIXLEAST64  \"I64X\"\n#define PRIoFAST64   \"I64o\"\n#define PRIuFAST64   \"I64u\"\n#define PRIxFAST64   \"I64x\"\n#define PRIXFAST64   \"I64X\"\n\n#define PRIoMAX     \"I64o\"\n#define PRIuMAX     \"I64u\"\n#define PRIxMAX     \"I64x\"\n#define PRIXMAX     \"I64X\"\n\n#define PRIoPTR     \"Io\"\n#define PRIuPTR     \"Iu\"\n#define PRIxPTR     \"Ix\"\n#define PRIXPTR     \"IX\"\n\n// The fscanf macros for signed integers are:\n#define SCNd8       \"d\"\n#define SCNi8       \"i\"\n#define SCNdLEAST8  \"d\"\n#define SCNiLEAST8  \"i\"\n#define SCNdFAST8   \"d\"\n#define SCNiFAST8   \"i\"\n\n#define SCNd16       \"hd\"\n#define SCNi16       \"hi\"\n#define SCNdLEAST16  \"hd\"\n#define SCNiLEAST16  \"hi\"\n#define SCNdFAST16   \"hd\"\n#define SCNiFAST16   \"hi\"\n\n#define SCNd32       \"ld\"\n#define SCNi32       \"li\"\n#define SCNdLEAST32  \"ld\"\n#define SCNiLEAST32  \"li\"\n#define SCNdFAST32   \"ld\"\n#define SCNiFAST32   \"li\"\n\n#define SCNd64       \"I64d\"\n#define SCNi64       \"I64i\"\n#define SCNdLEAST64  \"I64d\"\n#define SCNiLEAST64  \"I64i\"\n#define SCNdFAST64   \"I64d\"\n#define SCNiFAST64   \"I64i\"\n\n#define SCNdMAX     \"I64d\"\n#define SCNiMAX     \"I64i\"\n\n#ifdef _WIN64 // [\n#  define SCNdPTR     \"I64d\"\n#  define SCNiPTR     \"I64i\"\n#else  // _WIN64 ][\n#  define SCNdPTR     \"ld\"\n#  define SCNiPTR     \"li\"\n#endif  // _WIN64 ]\n\n// The fscanf macros for unsigned integers are:\n#define SCNo8       \"o\"\n#define SCNu8       \"u\"\n#define SCNx8       \"x\"\n#define SCNX8       \"X\"\n#define SCNoLEAST8  \"o\"\n#define SCNuLEAST8  \"u\"\n#define SCNxLEAST8  \"x\"\n#define SCNXLEAST8  \"X\"\n#define SCNoFAST8   \"o\"\n#define SCNuFAST8   \"u\"\n#define SCNxFAST8   \"x\"\n#define SCNXFAST8   \"X\"\n\n#define SCNo16       \"ho\"\n#define SCNu16       \"hu\"\n#define SCNx16       \"hx\"\n#define SCNX16       \"hX\"\n#define SCNoLEAST16  \"ho\"\n#define SCNuLEAST16  \"hu\"\n#define SCNxLEAST16  \"hx\"\n#define SCNXLEAST16  \"hX\"\n#define SCNoFAST16   \"ho\"\n#define SCNuFAST16   \"hu\"\n#define SCNxFAST16   \"hx\"\n#define SCNXFAST16   \"hX\"\n\n#define SCNo32       \"lo\"\n#define SCNu32       \"lu\"\n#define SCNx32       \"lx\"\n#define SCNX32       \"lX\"\n#define SCNoLEAST32  \"lo\"\n#define SCNuLEAST32  \"lu\"\n#define SCNxLEAST32  \"lx\"\n#define SCNXLEAST32  \"lX\"\n#define SCNoFAST32   \"lo\"\n#define SCNuFAST32   \"lu\"\n#define SCNxFAST32   \"lx\"\n#define SCNXFAST32   \"lX\"\n\n#define SCNo64       \"I64o\"\n#define SCNu64       \"I64u\"\n#define SCNx64       \"I64x\"\n#define SCNX64       \"I64X\"\n#define SCNoLEAST64  \"I64o\"\n#define SCNuLEAST64  \"I64u\"\n#define SCNxLEAST64  \"I64x\"\n#define SCNXLEAST64  \"I64X\"\n#define SCNoFAST64   \"I64o\"\n#define SCNuFAST64   \"I64u\"\n#define SCNxFAST64   \"I64x\"\n#define SCNXFAST64   \"I64X\"\n\n#define SCNoMAX     \"I64o\"\n#define SCNuMAX     \"I64u\"\n#define SCNxMAX     \"I64x\"\n#define SCNXMAX     \"I64X\"\n\n#ifdef _WIN64 // [\n#  define SCNoPTR     \"I64o\"\n#  define SCNuPTR     \"I64u\"\n#  define SCNxPTR     \"I64x\"\n#  define SCNXPTR     \"I64X\"\n#else  // _WIN64 ][\n#  define SCNoPTR     \"lo\"\n#  define SCNuPTR     \"lu\"\n#  define SCNxPTR     \"lx\"\n#  define SCNXPTR     \"lX\"\n#endif  // _WIN64 ]\n\n#endif // __STDC_FORMAT_MACROS ]\n\n// 7.8.2 Functions for greatest-width integer types\n\n// 7.8.2.1 The imaxabs function\n#define imaxabs _abs64\n\n// 7.8.2.2 The imaxdiv function\n\n// This is modified version of div() function from Microsoft's div.c found\n// in %MSVC.NET%\\crt\\src\\div.c\n#ifdef STATIC_IMAXDIV // [\nstatic\n#else // STATIC_IMAXDIV ][\n_inline\n#endif // STATIC_IMAXDIV ]\nimaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)\n{\n   imaxdiv_t result;\n\n   result.quot = numer / denom;\n   result.rem = numer % denom;\n\n   if (numer < 0 && result.rem > 0) {\n      // did division wrong; must fix up\n      ++result.quot;\n      result.rem -= denom;\n   }\n\n   return result;\n}\n\n// 7.8.2.3 The strtoimax and strtoumax functions\n#define strtoimax _strtoi64\n#define strtoumax _strtoui64\n\n// 7.8.2.4 The wcstoimax and wcstoumax functions\n#define wcstoimax _wcstoi64\n#define wcstoumax _wcstoui64\n\n\n#endif // _MSC_INTTYPES_H_ ]\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavcodec/avcodec.h",
    "content": "/*\n * copyright (c) 2001 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_AVCODEC_H\n#define AVCODEC_AVCODEC_H\n\n/**\n * @file\n * @ingroup libavc\n * Libavcodec external API header\n */\n\n#include <errno.h>\n#include \"libavutil/samplefmt.h\"\n#include \"libavutil/attributes.h\"\n#include \"libavutil/avutil.h\"\n#include \"libavutil/buffer.h\"\n#include \"libavutil/cpu.h\"\n#include \"libavutil/channel_layout.h\"\n#include \"libavutil/dict.h\"\n#include \"libavutil/frame.h\"\n#include \"libavutil/log.h\"\n#include \"libavutil/pixfmt.h\"\n#include \"libavutil/rational.h\"\n\n#include \"version.h\"\n\n#if FF_API_FAST_MALLOC\n// to provide fast_*alloc\n#include \"libavutil/mem.h\"\n#endif\n\n/**\n * @defgroup libavc Encoding/Decoding Library\n * @{\n *\n * @defgroup lavc_decoding Decoding\n * @{\n * @}\n *\n * @defgroup lavc_encoding Encoding\n * @{\n * @}\n *\n * @defgroup lavc_codec Codecs\n * @{\n * @defgroup lavc_codec_native Native Codecs\n * @{\n * @}\n * @defgroup lavc_codec_wrappers External library wrappers\n * @{\n * @}\n * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge\n * @{\n * @}\n * @}\n * @defgroup lavc_internal Internal\n * @{\n * @}\n * @}\n *\n */\n\n/**\n * @defgroup lavc_core Core functions/structures.\n * @ingroup libavc\n *\n * Basic definitions, functions for querying libavcodec capabilities,\n * allocating core structures, etc.\n * @{\n */\n\n\n/**\n * Identify the syntax and semantics of the bitstream.\n * The principle is roughly:\n * Two decoders with the same ID can decode the same streams.\n * Two encoders with the same ID can encode compatible streams.\n * There may be slight deviations from the principle due to implementation\n * details.\n *\n * If you add a codec ID to this list, add it so that\n * 1. no value of a existing codec ID changes (that would break ABI),\n * 2. Give it a value which when taken as ASCII is recognized uniquely by a human as this specific codec.\n *    This ensures that 2 forks can independently add AVCodecIDs without producing conflicts.\n *\n * After adding new codec IDs, do not forget to add an entry to the codec\n * descriptor list and bump libavcodec minor version.\n */\nenum AVCodecID {\n    AV_CODEC_ID_NONE,\n\n    /* video codecs */\n    AV_CODEC_ID_MPEG1VIDEO,\n    AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding\n#if FF_API_XVMC\n    AV_CODEC_ID_MPEG2VIDEO_XVMC,\n#endif /* FF_API_XVMC */\n    AV_CODEC_ID_H261,\n    AV_CODEC_ID_H263,\n    AV_CODEC_ID_RV10,\n    AV_CODEC_ID_RV20,\n    AV_CODEC_ID_MJPEG,\n    AV_CODEC_ID_MJPEGB,\n    AV_CODEC_ID_LJPEG,\n    AV_CODEC_ID_SP5X,\n    AV_CODEC_ID_JPEGLS,\n    AV_CODEC_ID_MPEG4,\n    AV_CODEC_ID_RAWVIDEO,\n    AV_CODEC_ID_MSMPEG4V1,\n    AV_CODEC_ID_MSMPEG4V2,\n    AV_CODEC_ID_MSMPEG4V3,\n    AV_CODEC_ID_WMV1,\n    AV_CODEC_ID_WMV2,\n    AV_CODEC_ID_H263P,\n    AV_CODEC_ID_H263I,\n    AV_CODEC_ID_FLV1,\n    AV_CODEC_ID_SVQ1,\n    AV_CODEC_ID_SVQ3,\n    AV_CODEC_ID_DVVIDEO,\n    AV_CODEC_ID_HUFFYUV,\n    AV_CODEC_ID_CYUV,\n    AV_CODEC_ID_H264,\n    AV_CODEC_ID_INDEO3,\n    AV_CODEC_ID_VP3,\n    AV_CODEC_ID_THEORA,\n    AV_CODEC_ID_ASV1,\n    AV_CODEC_ID_ASV2,\n    AV_CODEC_ID_FFV1,\n    AV_CODEC_ID_4XM,\n    AV_CODEC_ID_VCR1,\n    AV_CODEC_ID_CLJR,\n    AV_CODEC_ID_MDEC,\n    AV_CODEC_ID_ROQ,\n    AV_CODEC_ID_INTERPLAY_VIDEO,\n    AV_CODEC_ID_XAN_WC3,\n    AV_CODEC_ID_XAN_WC4,\n    AV_CODEC_ID_RPZA,\n    AV_CODEC_ID_CINEPAK,\n    AV_CODEC_ID_WS_VQA,\n    AV_CODEC_ID_MSRLE,\n    AV_CODEC_ID_MSVIDEO1,\n    AV_CODEC_ID_IDCIN,\n    AV_CODEC_ID_8BPS,\n    AV_CODEC_ID_SMC,\n    AV_CODEC_ID_FLIC,\n    AV_CODEC_ID_TRUEMOTION1,\n    AV_CODEC_ID_VMDVIDEO,\n    AV_CODEC_ID_MSZH,\n    AV_CODEC_ID_ZLIB,\n    AV_CODEC_ID_QTRLE,\n    AV_CODEC_ID_TSCC,\n    AV_CODEC_ID_ULTI,\n    AV_CODEC_ID_QDRAW,\n    AV_CODEC_ID_VIXL,\n    AV_CODEC_ID_QPEG,\n    AV_CODEC_ID_PNG,\n    AV_CODEC_ID_PPM,\n    AV_CODEC_ID_PBM,\n    AV_CODEC_ID_PGM,\n    AV_CODEC_ID_PGMYUV,\n    AV_CODEC_ID_PAM,\n    AV_CODEC_ID_FFVHUFF,\n    AV_CODEC_ID_RV30,\n    AV_CODEC_ID_RV40,\n    AV_CODEC_ID_VC1,\n    AV_CODEC_ID_WMV3,\n    AV_CODEC_ID_LOCO,\n    AV_CODEC_ID_WNV1,\n    AV_CODEC_ID_AASC,\n    AV_CODEC_ID_INDEO2,\n    AV_CODEC_ID_FRAPS,\n    AV_CODEC_ID_TRUEMOTION2,\n    AV_CODEC_ID_BMP,\n    AV_CODEC_ID_CSCD,\n    AV_CODEC_ID_MMVIDEO,\n    AV_CODEC_ID_ZMBV,\n    AV_CODEC_ID_AVS,\n    AV_CODEC_ID_SMACKVIDEO,\n    AV_CODEC_ID_NUV,\n    AV_CODEC_ID_KMVC,\n    AV_CODEC_ID_FLASHSV,\n    AV_CODEC_ID_CAVS,\n    AV_CODEC_ID_JPEG2000,\n    AV_CODEC_ID_VMNC,\n    AV_CODEC_ID_VP5,\n    AV_CODEC_ID_VP6,\n    AV_CODEC_ID_VP6F,\n    AV_CODEC_ID_TARGA,\n    AV_CODEC_ID_DSICINVIDEO,\n    AV_CODEC_ID_TIERTEXSEQVIDEO,\n    AV_CODEC_ID_TIFF,\n    AV_CODEC_ID_GIF,\n    AV_CODEC_ID_DXA,\n    AV_CODEC_ID_DNXHD,\n    AV_CODEC_ID_THP,\n    AV_CODEC_ID_SGI,\n    AV_CODEC_ID_C93,\n    AV_CODEC_ID_BETHSOFTVID,\n    AV_CODEC_ID_PTX,\n    AV_CODEC_ID_TXD,\n    AV_CODEC_ID_VP6A,\n    AV_CODEC_ID_AMV,\n    AV_CODEC_ID_VB,\n    AV_CODEC_ID_PCX,\n    AV_CODEC_ID_SUNRAST,\n    AV_CODEC_ID_INDEO4,\n    AV_CODEC_ID_INDEO5,\n    AV_CODEC_ID_MIMIC,\n    AV_CODEC_ID_RL2,\n    AV_CODEC_ID_ESCAPE124,\n    AV_CODEC_ID_DIRAC,\n    AV_CODEC_ID_BFI,\n    AV_CODEC_ID_CMV,\n    AV_CODEC_ID_MOTIONPIXELS,\n    AV_CODEC_ID_TGV,\n    AV_CODEC_ID_TGQ,\n    AV_CODEC_ID_TQI,\n    AV_CODEC_ID_AURA,\n    AV_CODEC_ID_AURA2,\n    AV_CODEC_ID_V210X,\n    AV_CODEC_ID_TMV,\n    AV_CODEC_ID_V210,\n    AV_CODEC_ID_DPX,\n    AV_CODEC_ID_MAD,\n    AV_CODEC_ID_FRWU,\n    AV_CODEC_ID_FLASHSV2,\n    AV_CODEC_ID_CDGRAPHICS,\n    AV_CODEC_ID_R210,\n    AV_CODEC_ID_ANM,\n    AV_CODEC_ID_BINKVIDEO,\n    AV_CODEC_ID_IFF_ILBM,\n    AV_CODEC_ID_IFF_BYTERUN1,\n    AV_CODEC_ID_KGV1,\n    AV_CODEC_ID_YOP,\n    AV_CODEC_ID_VP8,\n    AV_CODEC_ID_PICTOR,\n    AV_CODEC_ID_ANSI,\n    AV_CODEC_ID_A64_MULTI,\n    AV_CODEC_ID_A64_MULTI5,\n    AV_CODEC_ID_R10K,\n    AV_CODEC_ID_MXPEG,\n    AV_CODEC_ID_LAGARITH,\n    AV_CODEC_ID_PRORES,\n    AV_CODEC_ID_JV,\n    AV_CODEC_ID_DFA,\n    AV_CODEC_ID_WMV3IMAGE,\n    AV_CODEC_ID_VC1IMAGE,\n    AV_CODEC_ID_UTVIDEO,\n    AV_CODEC_ID_BMV_VIDEO,\n    AV_CODEC_ID_VBLE,\n    AV_CODEC_ID_DXTORY,\n    AV_CODEC_ID_V410,\n    AV_CODEC_ID_XWD,\n    AV_CODEC_ID_CDXL,\n    AV_CODEC_ID_XBM,\n    AV_CODEC_ID_ZEROCODEC,\n    AV_CODEC_ID_MSS1,\n    AV_CODEC_ID_MSA1,\n    AV_CODEC_ID_TSCC2,\n    AV_CODEC_ID_MTS2,\n    AV_CODEC_ID_CLLC,\n    AV_CODEC_ID_MSS2,\n    AV_CODEC_ID_VP9,\n    AV_CODEC_ID_AIC,\n    AV_CODEC_ID_ESCAPE130_DEPRECATED,\n    AV_CODEC_ID_G2M_DEPRECATED,\n    AV_CODEC_ID_WEBP_DEPRECATED,\n    AV_CODEC_ID_HNM4_VIDEO,\n    AV_CODEC_ID_HEVC_DEPRECATED,\n    AV_CODEC_ID_FIC,\n\n    AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'),\n    AV_CODEC_ID_Y41P       = MKBETAG('Y','4','1','P'),\n    AV_CODEC_ID_ESCAPE130  = MKBETAG('E','1','3','0'),\n    AV_CODEC_ID_EXR        = MKBETAG('0','E','X','R'),\n    AV_CODEC_ID_AVRP       = MKBETAG('A','V','R','P'),\n\n    AV_CODEC_ID_012V       = MKBETAG('0','1','2','V'),\n    AV_CODEC_ID_G2M        = MKBETAG( 0 ,'G','2','M'),\n    AV_CODEC_ID_AVUI       = MKBETAG('A','V','U','I'),\n    AV_CODEC_ID_AYUV       = MKBETAG('A','Y','U','V'),\n    AV_CODEC_ID_TARGA_Y216 = MKBETAG('T','2','1','6'),\n    AV_CODEC_ID_V308       = MKBETAG('V','3','0','8'),\n    AV_CODEC_ID_V408       = MKBETAG('V','4','0','8'),\n    AV_CODEC_ID_YUV4       = MKBETAG('Y','U','V','4'),\n    AV_CODEC_ID_SANM       = MKBETAG('S','A','N','M'),\n    AV_CODEC_ID_PAF_VIDEO  = MKBETAG('P','A','F','V'),\n    AV_CODEC_ID_AVRN       = MKBETAG('A','V','R','n'),\n    AV_CODEC_ID_CPIA       = MKBETAG('C','P','I','A'),\n    AV_CODEC_ID_XFACE      = MKBETAG('X','F','A','C'),\n    AV_CODEC_ID_SGIRLE     = MKBETAG('S','G','I','R'),\n    AV_CODEC_ID_MVC1       = MKBETAG('M','V','C','1'),\n    AV_CODEC_ID_MVC2       = MKBETAG('M','V','C','2'),\n    AV_CODEC_ID_SNOW       = MKBETAG('S','N','O','W'),\n    AV_CODEC_ID_WEBP       = MKBETAG('W','E','B','P'),\n    AV_CODEC_ID_SMVJPEG    = MKBETAG('S','M','V','J'),\n    AV_CODEC_ID_HEVC       = MKBETAG('H','2','6','5'),\n#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC\n\n    /* various PCM \"codecs\" */\n    AV_CODEC_ID_FIRST_AUDIO = 0x10000,     ///< A dummy id pointing at the start of audio codecs\n    AV_CODEC_ID_PCM_S16LE = 0x10000,\n    AV_CODEC_ID_PCM_S16BE,\n    AV_CODEC_ID_PCM_U16LE,\n    AV_CODEC_ID_PCM_U16BE,\n    AV_CODEC_ID_PCM_S8,\n    AV_CODEC_ID_PCM_U8,\n    AV_CODEC_ID_PCM_MULAW,\n    AV_CODEC_ID_PCM_ALAW,\n    AV_CODEC_ID_PCM_S32LE,\n    AV_CODEC_ID_PCM_S32BE,\n    AV_CODEC_ID_PCM_U32LE,\n    AV_CODEC_ID_PCM_U32BE,\n    AV_CODEC_ID_PCM_S24LE,\n    AV_CODEC_ID_PCM_S24BE,\n    AV_CODEC_ID_PCM_U24LE,\n    AV_CODEC_ID_PCM_U24BE,\n    AV_CODEC_ID_PCM_S24DAUD,\n    AV_CODEC_ID_PCM_ZORK,\n    AV_CODEC_ID_PCM_S16LE_PLANAR,\n    AV_CODEC_ID_PCM_DVD,\n    AV_CODEC_ID_PCM_F32BE,\n    AV_CODEC_ID_PCM_F32LE,\n    AV_CODEC_ID_PCM_F64BE,\n    AV_CODEC_ID_PCM_F64LE,\n    AV_CODEC_ID_PCM_BLURAY,\n    AV_CODEC_ID_PCM_LXF,\n    AV_CODEC_ID_S302M,\n    AV_CODEC_ID_PCM_S8_PLANAR,\n    AV_CODEC_ID_PCM_S24LE_PLANAR_DEPRECATED,\n    AV_CODEC_ID_PCM_S32LE_PLANAR_DEPRECATED,\n    AV_CODEC_ID_PCM_S24LE_PLANAR = MKBETAG(24,'P','S','P'),\n    AV_CODEC_ID_PCM_S32LE_PLANAR = MKBETAG(32,'P','S','P'),\n    AV_CODEC_ID_PCM_S16BE_PLANAR = MKBETAG('P','S','P',16),\n\n    /* various ADPCM codecs */\n    AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,\n    AV_CODEC_ID_ADPCM_IMA_WAV,\n    AV_CODEC_ID_ADPCM_IMA_DK3,\n    AV_CODEC_ID_ADPCM_IMA_DK4,\n    AV_CODEC_ID_ADPCM_IMA_WS,\n    AV_CODEC_ID_ADPCM_IMA_SMJPEG,\n    AV_CODEC_ID_ADPCM_MS,\n    AV_CODEC_ID_ADPCM_4XM,\n    AV_CODEC_ID_ADPCM_XA,\n    AV_CODEC_ID_ADPCM_ADX,\n    AV_CODEC_ID_ADPCM_EA,\n    AV_CODEC_ID_ADPCM_G726,\n    AV_CODEC_ID_ADPCM_CT,\n    AV_CODEC_ID_ADPCM_SWF,\n    AV_CODEC_ID_ADPCM_YAMAHA,\n    AV_CODEC_ID_ADPCM_SBPRO_4,\n    AV_CODEC_ID_ADPCM_SBPRO_3,\n    AV_CODEC_ID_ADPCM_SBPRO_2,\n    AV_CODEC_ID_ADPCM_THP,\n    AV_CODEC_ID_ADPCM_IMA_AMV,\n    AV_CODEC_ID_ADPCM_EA_R1,\n    AV_CODEC_ID_ADPCM_EA_R3,\n    AV_CODEC_ID_ADPCM_EA_R2,\n    AV_CODEC_ID_ADPCM_IMA_EA_SEAD,\n    AV_CODEC_ID_ADPCM_IMA_EA_EACS,\n    AV_CODEC_ID_ADPCM_EA_XAS,\n    AV_CODEC_ID_ADPCM_EA_MAXIS_XA,\n    AV_CODEC_ID_ADPCM_IMA_ISS,\n    AV_CODEC_ID_ADPCM_G722,\n    AV_CODEC_ID_ADPCM_IMA_APC,\n    AV_CODEC_ID_VIMA       = MKBETAG('V','I','M','A'),\n    AV_CODEC_ID_ADPCM_AFC  = MKBETAG('A','F','C',' '),\n    AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '),\n    AV_CODEC_ID_ADPCM_DTK  = MKBETAG('D','T','K',' '),\n    AV_CODEC_ID_ADPCM_IMA_RAD = MKBETAG('R','A','D',' '),\n    AV_CODEC_ID_ADPCM_G726LE = MKBETAG('6','2','7','G'),\n\n    /* AMR */\n    AV_CODEC_ID_AMR_NB = 0x12000,\n    AV_CODEC_ID_AMR_WB,\n\n    /* RealAudio codecs*/\n    AV_CODEC_ID_RA_144 = 0x13000,\n    AV_CODEC_ID_RA_288,\n\n    /* various DPCM codecs */\n    AV_CODEC_ID_ROQ_DPCM = 0x14000,\n    AV_CODEC_ID_INTERPLAY_DPCM,\n    AV_CODEC_ID_XAN_DPCM,\n    AV_CODEC_ID_SOL_DPCM,\n\n    /* audio codecs */\n    AV_CODEC_ID_MP2 = 0x15000,\n    AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3\n    AV_CODEC_ID_AAC,\n    AV_CODEC_ID_AC3,\n    AV_CODEC_ID_DTS,\n    AV_CODEC_ID_VORBIS,\n    AV_CODEC_ID_DVAUDIO,\n    AV_CODEC_ID_WMAV1,\n    AV_CODEC_ID_WMAV2,\n    AV_CODEC_ID_MACE3,\n    AV_CODEC_ID_MACE6,\n    AV_CODEC_ID_VMDAUDIO,\n    AV_CODEC_ID_FLAC,\n    AV_CODEC_ID_MP3ADU,\n    AV_CODEC_ID_MP3ON4,\n    AV_CODEC_ID_SHORTEN,\n    AV_CODEC_ID_ALAC,\n    AV_CODEC_ID_WESTWOOD_SND1,\n    AV_CODEC_ID_GSM, ///< as in Berlin toast format\n    AV_CODEC_ID_QDM2,\n    AV_CODEC_ID_COOK,\n    AV_CODEC_ID_TRUESPEECH,\n    AV_CODEC_ID_TTA,\n    AV_CODEC_ID_SMACKAUDIO,\n    AV_CODEC_ID_QCELP,\n    AV_CODEC_ID_WAVPACK,\n    AV_CODEC_ID_DSICINAUDIO,\n    AV_CODEC_ID_IMC,\n    AV_CODEC_ID_MUSEPACK7,\n    AV_CODEC_ID_MLP,\n    AV_CODEC_ID_GSM_MS, /* as found in WAV */\n    AV_CODEC_ID_ATRAC3,\n#if FF_API_VOXWARE\n    AV_CODEC_ID_VOXWARE,\n#endif\n    AV_CODEC_ID_APE,\n    AV_CODEC_ID_NELLYMOSER,\n    AV_CODEC_ID_MUSEPACK8,\n    AV_CODEC_ID_SPEEX,\n    AV_CODEC_ID_WMAVOICE,\n    AV_CODEC_ID_WMAPRO,\n    AV_CODEC_ID_WMALOSSLESS,\n    AV_CODEC_ID_ATRAC3P,\n    AV_CODEC_ID_EAC3,\n    AV_CODEC_ID_SIPR,\n    AV_CODEC_ID_MP1,\n    AV_CODEC_ID_TWINVQ,\n    AV_CODEC_ID_TRUEHD,\n    AV_CODEC_ID_MP4ALS,\n    AV_CODEC_ID_ATRAC1,\n    AV_CODEC_ID_BINKAUDIO_RDFT,\n    AV_CODEC_ID_BINKAUDIO_DCT,\n    AV_CODEC_ID_AAC_LATM,\n    AV_CODEC_ID_QDMC,\n    AV_CODEC_ID_CELT,\n    AV_CODEC_ID_G723_1,\n    AV_CODEC_ID_G729,\n    AV_CODEC_ID_8SVX_EXP,\n    AV_CODEC_ID_8SVX_FIB,\n    AV_CODEC_ID_BMV_AUDIO,\n    AV_CODEC_ID_RALF,\n    AV_CODEC_ID_IAC,\n    AV_CODEC_ID_ILBC,\n    AV_CODEC_ID_OPUS_DEPRECATED,\n    AV_CODEC_ID_COMFORT_NOISE,\n    AV_CODEC_ID_TAK_DEPRECATED,\n    AV_CODEC_ID_METASOUND,\n    AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),\n    AV_CODEC_ID_SONIC       = MKBETAG('S','O','N','C'),\n    AV_CODEC_ID_SONIC_LS    = MKBETAG('S','O','N','L'),\n    AV_CODEC_ID_PAF_AUDIO   = MKBETAG('P','A','F','A'),\n    AV_CODEC_ID_OPUS        = MKBETAG('O','P','U','S'),\n    AV_CODEC_ID_TAK         = MKBETAG('t','B','a','K'),\n    AV_CODEC_ID_EVRC        = MKBETAG('s','e','v','c'),\n    AV_CODEC_ID_SMV         = MKBETAG('s','s','m','v'),\n\n    /* subtitle codecs */\n    AV_CODEC_ID_FIRST_SUBTITLE = 0x17000,          ///< A dummy ID pointing at the start of subtitle codecs.\n    AV_CODEC_ID_DVD_SUBTITLE = 0x17000,\n    AV_CODEC_ID_DVB_SUBTITLE,\n    AV_CODEC_ID_TEXT,  ///< raw UTF-8 text\n    AV_CODEC_ID_XSUB,\n    AV_CODEC_ID_SSA,\n    AV_CODEC_ID_MOV_TEXT,\n    AV_CODEC_ID_HDMV_PGS_SUBTITLE,\n    AV_CODEC_ID_DVB_TELETEXT,\n    AV_CODEC_ID_SRT,\n    AV_CODEC_ID_MICRODVD   = MKBETAG('m','D','V','D'),\n    AV_CODEC_ID_EIA_608    = MKBETAG('c','6','0','8'),\n    AV_CODEC_ID_JACOSUB    = MKBETAG('J','S','U','B'),\n    AV_CODEC_ID_SAMI       = MKBETAG('S','A','M','I'),\n    AV_CODEC_ID_REALTEXT   = MKBETAG('R','T','X','T'),\n    AV_CODEC_ID_SUBVIEWER1 = MKBETAG('S','b','V','1'),\n    AV_CODEC_ID_SUBVIEWER  = MKBETAG('S','u','b','V'),\n    AV_CODEC_ID_SUBRIP     = MKBETAG('S','R','i','p'),\n    AV_CODEC_ID_WEBVTT     = MKBETAG('W','V','T','T'),\n    AV_CODEC_ID_MPL2       = MKBETAG('M','P','L','2'),\n    AV_CODEC_ID_VPLAYER    = MKBETAG('V','P','l','r'),\n    AV_CODEC_ID_PJS        = MKBETAG('P','h','J','S'),\n    AV_CODEC_ID_ASS        = MKBETAG('A','S','S',' '),  ///< ASS as defined in Matroska\n\n    /* other specific kind of codecs (generally used for attachments) */\n    AV_CODEC_ID_FIRST_UNKNOWN = 0x18000,           ///< A dummy ID pointing at the start of various fake codecs.\n    AV_CODEC_ID_TTF = 0x18000,\n    AV_CODEC_ID_BINTEXT    = MKBETAG('B','T','X','T'),\n    AV_CODEC_ID_XBIN       = MKBETAG('X','B','I','N'),\n    AV_CODEC_ID_IDF        = MKBETAG( 0 ,'I','D','F'),\n    AV_CODEC_ID_OTF        = MKBETAG( 0 ,'O','T','F'),\n    AV_CODEC_ID_SMPTE_KLV  = MKBETAG('K','L','V','A'),\n    AV_CODEC_ID_DVD_NAV    = MKBETAG('D','N','A','V'),\n    AV_CODEC_ID_TIMED_ID3  = MKBETAG('T','I','D','3'),\n\n\n    AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it\n\n    AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS\n                                * stream (only used by libavformat) */\n    AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems\n                                * stream (only used by libavformat) */\n    AV_CODEC_ID_FFMETADATA = 0x21000,   ///< Dummy codec for streams containing only metadata information.\n\n#if FF_API_CODEC_ID\n#include \"old_codec_ids.h\"\n#endif\n};\n\n/**\n * This struct describes the properties of a single codec described by an\n * AVCodecID.\n * @see avcodec_get_descriptor()\n */\ntypedef struct AVCodecDescriptor {\n    enum AVCodecID     id;\n    enum AVMediaType type;\n    /**\n     * Name of the codec described by this descriptor. It is non-empty and\n     * unique for each codec descriptor. It should contain alphanumeric\n     * characters and '_' only.\n     */\n    const char      *name;\n    /**\n     * A more descriptive name for this codec. May be NULL.\n     */\n    const char *long_name;\n    /**\n     * Codec properties, a combination of AV_CODEC_PROP_* flags.\n     */\n    int             props;\n} AVCodecDescriptor;\n\n/**\n * Codec uses only intra compression.\n * Video codecs only.\n */\n#define AV_CODEC_PROP_INTRA_ONLY    (1 << 0)\n/**\n * Codec supports lossy compression. Audio and video codecs only.\n * @note a codec may support both lossy and lossless\n * compression modes\n */\n#define AV_CODEC_PROP_LOSSY         (1 << 1)\n/**\n * Codec supports lossless compression. Audio and video codecs only.\n */\n#define AV_CODEC_PROP_LOSSLESS      (1 << 2)\n/**\n * Subtitle codec is bitmap based\n * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field.\n */\n#define AV_CODEC_PROP_BITMAP_SUB    (1 << 16)\n/**\n * Subtitle codec is text based.\n * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field.\n */\n#define AV_CODEC_PROP_TEXT_SUB      (1 << 17)\n\n/**\n * @ingroup lavc_decoding\n * Required number of additionally allocated bytes at the end of the input bitstream for decoding.\n * This is mainly needed because some optimized bitstream readers read\n * 32 or 64 bit at once and could read over the end.<br>\n * Note: If the first 23 bits of the additional bytes are not 0, then damaged\n * MPEG bitstreams could cause overread and segfault.\n */\n#define FF_INPUT_BUFFER_PADDING_SIZE 16\n\n/**\n * @ingroup lavc_encoding\n * minimum encoding buffer size\n * Used to avoid some checks during header writing.\n */\n#define FF_MIN_BUFFER_SIZE 16384\n\n\n/**\n * @ingroup lavc_encoding\n * motion estimation type.\n */\nenum Motion_Est_ID {\n    ME_ZERO = 1,    ///< no search, that is use 0,0 vector whenever one is needed\n    ME_FULL,\n    ME_LOG,\n    ME_PHODS,\n    ME_EPZS,        ///< enhanced predictive zonal search\n    ME_X1,          ///< reserved for experiments\n    ME_HEX,         ///< hexagon based search\n    ME_UMH,         ///< uneven multi-hexagon search\n    ME_TESA,        ///< transformed exhaustive search algorithm\n    ME_ITER=50,     ///< iterative search\n};\n\n/**\n * @ingroup lavc_decoding\n */\nenum AVDiscard{\n    /* We leave some space between them for extensions (drop some\n     * keyframes for intra-only or drop just some bidir frames). */\n    AVDISCARD_NONE    =-16, ///< discard nothing\n    AVDISCARD_DEFAULT =  0, ///< discard useless packets like 0 size packets in avi\n    AVDISCARD_NONREF  =  8, ///< discard all non reference\n    AVDISCARD_BIDIR   = 16, ///< discard all bidirectional frames\n    AVDISCARD_NONKEY  = 32, ///< discard all frames except keyframes\n    AVDISCARD_ALL     = 48, ///< discard all\n};\n\nenum AVColorPrimaries{\n    AVCOL_PRI_BT709       = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B\n    AVCOL_PRI_UNSPECIFIED = 2,\n    AVCOL_PRI_BT470M      = 4,\n    AVCOL_PRI_BT470BG     = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM\n    AVCOL_PRI_SMPTE170M   = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC\n    AVCOL_PRI_SMPTE240M   = 7, ///< functionally identical to above\n    AVCOL_PRI_FILM        = 8,\n    AVCOL_PRI_BT2020      = 9, ///< ITU-R BT2020\n    AVCOL_PRI_NB             , ///< Not part of ABI\n};\n\nenum AVColorTransferCharacteristic{\n    AVCOL_TRC_BT709        =  1, ///< also ITU-R BT1361\n    AVCOL_TRC_UNSPECIFIED  =  2,\n    AVCOL_TRC_GAMMA22      =  4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM\n    AVCOL_TRC_GAMMA28      =  5, ///< also ITU-R BT470BG\n    AVCOL_TRC_SMPTE170M    =  6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC\n    AVCOL_TRC_SMPTE240M    =  7,\n    AVCOL_TRC_LINEAR       =  8, ///< \"Linear transfer characteristics\"\n    AVCOL_TRC_LOG          =  9, ///< \"Logarithmic transfer characteristic (100:1 range)\"\n    AVCOL_TRC_LOG_SQRT     = 10, ///< \"Logarithmic transfer characteristic (100 * Sqrt( 10 ) : 1 range)\"\n    AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4\n    AVCOL_TRC_BT1361_ECG   = 12, ///< ITU-R BT1361 Extended Colour Gamut\n    AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)\n    AVCOL_TRC_BT2020_10    = 14, ///< ITU-R BT2020 for 10 bit system\n    AVCOL_TRC_BT2020_12    = 15, ///< ITU-R BT2020 for 12 bit system\n    AVCOL_TRC_NB               , ///< Not part of ABI\n};\n\n/**\n *  X   X      3 4 X      X are luma samples,\n *             1 2        1-6 are possible chroma positions\n *  X   X      5 6 X      0 is undefined/unknown position\n */\nenum AVChromaLocation{\n    AVCHROMA_LOC_UNSPECIFIED = 0,\n    AVCHROMA_LOC_LEFT        = 1, ///< mpeg2/4, h264 default\n    AVCHROMA_LOC_CENTER      = 2, ///< mpeg1, jpeg, h263\n    AVCHROMA_LOC_TOPLEFT     = 3, ///< DV\n    AVCHROMA_LOC_TOP         = 4,\n    AVCHROMA_LOC_BOTTOMLEFT  = 5,\n    AVCHROMA_LOC_BOTTOM      = 6,\n    AVCHROMA_LOC_NB             , ///< Not part of ABI\n};\n\nenum AVAudioServiceType {\n    AV_AUDIO_SERVICE_TYPE_MAIN              = 0,\n    AV_AUDIO_SERVICE_TYPE_EFFECTS           = 1,\n    AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,\n    AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED  = 3,\n    AV_AUDIO_SERVICE_TYPE_DIALOGUE          = 4,\n    AV_AUDIO_SERVICE_TYPE_COMMENTARY        = 5,\n    AV_AUDIO_SERVICE_TYPE_EMERGENCY         = 6,\n    AV_AUDIO_SERVICE_TYPE_VOICE_OVER        = 7,\n    AV_AUDIO_SERVICE_TYPE_KARAOKE           = 8,\n    AV_AUDIO_SERVICE_TYPE_NB                   , ///< Not part of ABI\n};\n\n/**\n * @ingroup lavc_encoding\n */\ntypedef struct RcOverride{\n    int start_frame;\n    int end_frame;\n    int qscale; // If this is 0 then quality_factor will be used instead.\n    float quality_factor;\n} RcOverride;\n\n#if FF_API_MAX_BFRAMES\n/**\n * @deprecated there is no libavcodec-wide limit on the number of B-frames\n */\n#define FF_MAX_B_FRAMES 16\n#endif\n\n/* encoding support\n   These flags can be passed in AVCodecContext.flags before initialization.\n   Note: Not everything is supported yet.\n*/\n\n/**\n * Allow decoders to produce frames with data planes that are not aligned\n * to CPU requirements (e.g. due to cropping).\n */\n#define CODEC_FLAG_UNALIGNED 0x0001\n#define CODEC_FLAG_QSCALE 0x0002  ///< Use fixed qscale.\n#define CODEC_FLAG_4MV    0x0004  ///< 4 MV per MB allowed / advanced prediction for H.263.\n#define CODEC_FLAG_OUTPUT_CORRUPT 0x0008 ///< Output even those frames that might be corrupted\n#define CODEC_FLAG_QPEL   0x0010  ///< Use qpel MC.\n#define CODEC_FLAG_GMC    0x0020  ///< Use GMC.\n#define CODEC_FLAG_MV0    0x0040  ///< Always try a MB with MV=<0,0>.\n/**\n * The parent program guarantees that the input for B-frames containing\n * streams is not written to for at least s->max_b_frames+1 frames, if\n * this is not set the input will be copied.\n */\n#define CODEC_FLAG_INPUT_PRESERVED 0x0100\n#define CODEC_FLAG_PASS1           0x0200   ///< Use internal 2pass ratecontrol in first pass mode.\n#define CODEC_FLAG_PASS2           0x0400   ///< Use internal 2pass ratecontrol in second pass mode.\n#define CODEC_FLAG_GRAY            0x2000   ///< Only decode/encode grayscale.\n#if FF_API_EMU_EDGE\n/**\n * @deprecated edges are not used/required anymore. I.e. this flag is now always\n * set.\n */\n#define CODEC_FLAG_EMU_EDGE        0x4000\n#endif\n#define CODEC_FLAG_PSNR            0x8000   ///< error[?] variables will be set during encoding.\n#define CODEC_FLAG_TRUNCATED       0x00010000 /** Input bitstream might be truncated at a random\n                                                  location instead of only at frame boundaries. */\n#define CODEC_FLAG_NORMALIZE_AQP  0x00020000 ///< Normalize adaptive quantization.\n#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT.\n#define CODEC_FLAG_LOW_DELAY      0x00080000 ///< Force low delay.\n#define CODEC_FLAG_GLOBAL_HEADER  0x00400000 ///< Place global headers in extradata instead of every keyframe.\n#define CODEC_FLAG_BITEXACT       0x00800000 ///< Use only bitexact stuff (except (I)DCT).\n/* Fx : Flag for h263+ extra options */\n#define CODEC_FLAG_AC_PRED        0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction\n#define CODEC_FLAG_LOOP_FILTER    0x00000800 ///< loop filter\n#define CODEC_FLAG_INTERLACED_ME  0x20000000 ///< interlaced motion estimation\n#define CODEC_FLAG_CLOSED_GOP     0x80000000\n#define CODEC_FLAG2_FAST          0x00000001 ///< Allow non spec compliant speedup tricks.\n#define CODEC_FLAG2_NO_OUTPUT     0x00000004 ///< Skip bitstream encoding.\n#define CODEC_FLAG2_LOCAL_HEADER  0x00000008 ///< Place global headers at every keyframe instead of in extradata.\n#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!!\n#define CODEC_FLAG2_IGNORE_CROP   0x00010000 ///< Discard cropping information from SPS.\n\n#define CODEC_FLAG2_CHUNKS        0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.\n#define CODEC_FLAG2_SHOW_ALL      0x00400000 ///< Show all frames before the first keyframe\n\n/* Unsupported options :\n *              Syntax Arithmetic coding (SAC)\n *              Reference Picture Selection\n *              Independent Segment Decoding */\n/* /Fx */\n/* codec capabilities */\n\n#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback.\n/**\n * Codec uses get_buffer() for allocating buffers and supports custom allocators.\n * If not set, it might not use get_buffer() at all or use operations that\n * assume the buffer was allocated by avcodec_default_get_buffer.\n */\n#define CODEC_CAP_DR1             0x0002\n#define CODEC_CAP_TRUNCATED       0x0008\n#if FF_API_XVMC\n/* Codec can export data for HW decoding. This flag indicates that\n * the codec would call get_format() with list that might contain HW accelerated\n * pixel formats (XvMC, VDPAU, VAAPI, etc). The application can pick any of them\n * including raw image format.\n * The application can use the passed context to determine bitstream version,\n * chroma format, resolution etc.\n */\n#define CODEC_CAP_HWACCEL         0x0010\n#endif /* FF_API_XVMC */\n/**\n * Encoder or decoder requires flushing with NULL input at the end in order to\n * give the complete and correct output.\n *\n * NOTE: If this flag is not set, the codec is guaranteed to never be fed with\n *       with NULL data. The user can still send NULL data to the public encode\n *       or decode function, but libavcodec will not pass it along to the codec\n *       unless this flag is set.\n *\n * Decoders:\n * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,\n * avpkt->size=0 at the end to get the delayed data until the decoder no longer\n * returns frames.\n *\n * Encoders:\n * The encoder needs to be fed with NULL data at the end of encoding until the\n * encoder no longer returns data.\n *\n * NOTE: For encoders implementing the AVCodec.encode2() function, setting this\n *       flag also means that the encoder must set the pts and duration for\n *       each output packet. If this flag is not set, the pts and duration will\n *       be determined by libavcodec from the input frame.\n */\n#define CODEC_CAP_DELAY           0x0020\n/**\n * Codec can be fed a final frame with a smaller size.\n * This can be used to prevent truncation of the last audio samples.\n */\n#define CODEC_CAP_SMALL_LAST_FRAME 0x0040\n#if FF_API_CAP_VDPAU\n/**\n * Codec can export data for HW decoding (VDPAU).\n */\n#define CODEC_CAP_HWACCEL_VDPAU    0x0080\n#endif\n/**\n * Codec can output multiple frames per AVPacket\n * Normally demuxers return one frame at a time, demuxers which do not do\n * are connected to a parser to split what they return into proper frames.\n * This flag is reserved to the very rare category of codecs which have a\n * bitstream that cannot be split into frames without timeconsuming\n * operations like full decoding. Demuxers carring such bitstreams thus\n * may return multiple frames in a packet. This has many disadvantages like\n * prohibiting stream copy in many cases thus it should only be considered\n * as a last resort.\n */\n#define CODEC_CAP_SUBFRAMES        0x0100\n/**\n * Codec is experimental and is thus avoided in favor of non experimental\n * encoders\n */\n#define CODEC_CAP_EXPERIMENTAL     0x0200\n/**\n * Codec should fill in channel configuration and samplerate instead of container\n */\n#define CODEC_CAP_CHANNEL_CONF     0x0400\n#if FF_API_NEG_LINESIZES\n/**\n * @deprecated no codecs use this capability\n */\n#define CODEC_CAP_NEG_LINESIZES    0x0800\n#endif\n/**\n * Codec supports frame-level multithreading.\n */\n#define CODEC_CAP_FRAME_THREADS    0x1000\n/**\n * Codec supports slice-based (or partition-based) multithreading.\n */\n#define CODEC_CAP_SLICE_THREADS    0x2000\n/**\n * Codec supports changed parameters at any point.\n */\n#define CODEC_CAP_PARAM_CHANGE     0x4000\n/**\n * Codec supports avctx->thread_count == 0 (auto).\n */\n#define CODEC_CAP_AUTO_THREADS     0x8000\n/**\n * Audio encoder supports receiving a different number of samples in each call.\n */\n#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000\n/**\n * Codec is intra only.\n */\n#define CODEC_CAP_INTRA_ONLY       0x40000000\n/**\n * Codec is lossless.\n */\n#define CODEC_CAP_LOSSLESS         0x80000000\n\n#if FF_API_MB_TYPE\n//The following defines may change, don't expect compatibility if you use them.\n#define MB_TYPE_INTRA4x4   0x0001\n#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific\n#define MB_TYPE_INTRA_PCM  0x0004 //FIXME H.264-specific\n#define MB_TYPE_16x16      0x0008\n#define MB_TYPE_16x8       0x0010\n#define MB_TYPE_8x16       0x0020\n#define MB_TYPE_8x8        0x0040\n#define MB_TYPE_INTERLACED 0x0080\n#define MB_TYPE_DIRECT2    0x0100 //FIXME\n#define MB_TYPE_ACPRED     0x0200\n#define MB_TYPE_GMC        0x0400\n#define MB_TYPE_SKIP       0x0800\n#define MB_TYPE_P0L0       0x1000\n#define MB_TYPE_P1L0       0x2000\n#define MB_TYPE_P0L1       0x4000\n#define MB_TYPE_P1L1       0x8000\n#define MB_TYPE_L0         (MB_TYPE_P0L0 | MB_TYPE_P1L0)\n#define MB_TYPE_L1         (MB_TYPE_P0L1 | MB_TYPE_P1L1)\n#define MB_TYPE_L0L1       (MB_TYPE_L0   | MB_TYPE_L1)\n#define MB_TYPE_QUANT      0x00010000\n#define MB_TYPE_CBP        0x00020000\n//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)\n#endif\n\n/**\n * Pan Scan area.\n * This specifies the area which should be displayed.\n * Note there may be multiple such areas for one frame.\n */\ntypedef struct AVPanScan{\n    /**\n     * id\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    int id;\n\n    /**\n     * width and height in 1/16 pel\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    int width;\n    int height;\n\n    /**\n     * position of the top left corner in 1/16 pel for up to 3 fields/frames\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    int16_t position[3][2];\n}AVPanScan;\n\n#if FF_API_QSCALE_TYPE\n#define FF_QSCALE_TYPE_MPEG1 0\n#define FF_QSCALE_TYPE_MPEG2 1\n#define FF_QSCALE_TYPE_H264  2\n#define FF_QSCALE_TYPE_VP56  3\n#endif\n\n#if FF_API_GET_BUFFER\n#define FF_BUFFER_TYPE_INTERNAL 1\n#define FF_BUFFER_TYPE_USER     2 ///< direct rendering buffers (image is (de)allocated by user)\n#define FF_BUFFER_TYPE_SHARED   4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.\n#define FF_BUFFER_TYPE_COPY     8 ///< Just a (modified) copy of some other buffer, don't deallocate anything.\n\n#define FF_BUFFER_HINTS_VALID    0x01 // Buffer hints value is meaningful (if 0 ignore).\n#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.\n#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.\n#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).\n#endif\n\n/**\n * The decoder will keep a reference to the frame and may reuse it later.\n */\n#define AV_GET_BUFFER_FLAG_REF (1 << 0)\n\n/**\n * @defgroup lavc_packet AVPacket\n *\n * Types and functions for working with AVPacket.\n * @{\n */\nenum AVPacketSideDataType {\n    AV_PKT_DATA_PALETTE,\n    AV_PKT_DATA_NEW_EXTRADATA,\n\n    /**\n     * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:\n     * @code\n     * u32le param_flags\n     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)\n     *     s32le channel_count\n     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)\n     *     u64le channel_layout\n     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)\n     *     s32le sample_rate\n     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)\n     *     s32le width\n     *     s32le height\n     * @endcode\n     */\n    AV_PKT_DATA_PARAM_CHANGE,\n\n    /**\n     * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of\n     * structures with info about macroblocks relevant to splitting the\n     * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).\n     * That is, it does not necessarily contain info about all macroblocks,\n     * as long as the distance between macroblocks in the info is smaller\n     * than the target payload size.\n     * Each MB info structure is 12 bytes, and is laid out as follows:\n     * @code\n     * u32le bit offset from the start of the packet\n     * u8    current quantizer at the start of the macroblock\n     * u8    GOB number\n     * u16le macroblock address within the GOB\n     * u8    horizontal MV predictor\n     * u8    vertical MV predictor\n     * u8    horizontal MV predictor for block number 3\n     * u8    vertical MV predictor for block number 3\n     * @endcode\n     */\n    AV_PKT_DATA_H263_MB_INFO,\n\n    /**\n     * Recommmends skipping the specified number of samples\n     * @code\n     * u32le number of samples to skip from start of this packet\n     * u32le number of samples to skip from end of this packet\n     * u8    reason for start skip\n     * u8    reason for end   skip (0=padding silence, 1=convergence)\n     * @endcode\n     */\n    AV_PKT_DATA_SKIP_SAMPLES=70,\n\n    /**\n     * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that\n     * the packet may contain \"dual mono\" audio specific to Japanese DTV\n     * and if it is true, recommends only the selected channel to be used.\n     * @code\n     * u8    selected channels (0=mail/left, 1=sub/right, 2=both)\n     * @endcode\n     */\n    AV_PKT_DATA_JP_DUALMONO,\n\n    /**\n     * A list of zero terminated key/value strings. There is no end marker for\n     * the list, so it is required to rely on the side data size to stop.\n     */\n    AV_PKT_DATA_STRINGS_METADATA,\n\n    /**\n     * Subtitle event position\n     * @code\n     * u32le x1\n     * u32le y1\n     * u32le x2\n     * u32le y2\n     * @endcode\n     */\n    AV_PKT_DATA_SUBTITLE_POSITION,\n\n    /**\n     * Data found in BlockAdditional element of matroska container. There is\n     * no end marker for the data, so it is required to rely on the side data\n     * size to recognize the end. 8 byte id (as found in BlockAddId) followed\n     * by data.\n     */\n    AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,\n\n    /**\n     * The optional first identifier line of a WebVTT cue.\n     */\n    AV_PKT_DATA_WEBVTT_IDENTIFIER,\n\n    /**\n     * The optional settings (rendering instructions) that immediately\n     * follow the timestamp specifier of a WebVTT cue.\n     */\n    AV_PKT_DATA_WEBVTT_SETTINGS,\n\n    /**\n     * A list of zero terminated key/value strings. There is no end marker for\n     * the list, so it is required to rely on the side data size to stop. This\n     * side data includes updated metadata which appeared in the stream.\n     */\n    AV_PKT_DATA_METADATA_UPDATE,\n};\n\n/**\n * This structure stores compressed data. It is typically exported by demuxers\n * and then passed as input to decoders, or received as output from encoders and\n * then passed to muxers.\n *\n * For video, it should typically contain one compressed frame. For audio it may\n * contain several compressed frames.\n *\n * AVPacket is one of the few structs in FFmpeg, whose size is a part of public\n * ABI. Thus it may be allocated on stack and no new fields can be added to it\n * without libavcodec and libavformat major bump.\n *\n * The semantics of data ownership depends on the buf or destruct (deprecated)\n * fields. If either is set, the packet data is dynamically allocated and is\n * valid indefinitely until av_free_packet() is called (which in turn calls\n * av_buffer_unref()/the destruct callback to free the data). If neither is set,\n * the packet data is typically backed by some static buffer somewhere and is\n * only valid for a limited time (e.g. until the next read call when demuxing).\n *\n * The side data is always allocated with av_malloc() and is freed in\n * av_free_packet().\n */\ntypedef struct AVPacket {\n    /**\n     * A reference to the reference-counted buffer where the packet data is\n     * stored.\n     * May be NULL, then the packet data is not reference-counted.\n     */\n    AVBufferRef *buf;\n    /**\n     * Presentation timestamp in AVStream->time_base units; the time at which\n     * the decompressed packet will be presented to the user.\n     * Can be AV_NOPTS_VALUE if it is not stored in the file.\n     * pts MUST be larger or equal to dts as presentation cannot happen before\n     * decompression, unless one wants to view hex dumps. Some formats misuse\n     * the terms dts and pts/cts to mean something different. Such timestamps\n     * must be converted to true pts/dts before they are stored in AVPacket.\n     */\n    int64_t pts;\n    /**\n     * Decompression timestamp in AVStream->time_base units; the time at which\n     * the packet is decompressed.\n     * Can be AV_NOPTS_VALUE if it is not stored in the file.\n     */\n    int64_t dts;\n    uint8_t *data;\n    int   size;\n    int   stream_index;\n    /**\n     * A combination of AV_PKT_FLAG values\n     */\n    int   flags;\n    /**\n     * Additional packet data that can be provided by the container.\n     * Packet can contain several types of side information.\n     */\n    struct {\n        uint8_t *data;\n        int      size;\n        enum AVPacketSideDataType type;\n    } *side_data;\n    int side_data_elems;\n\n    /**\n     * Duration of this packet in AVStream->time_base units, 0 if unknown.\n     * Equals next_pts - this_pts in presentation order.\n     */\n    int   duration;\n#if FF_API_DESTRUCT_PACKET\n    attribute_deprecated\n    void  (*destruct)(struct AVPacket *);\n    attribute_deprecated\n    void  *priv;\n#endif\n    int64_t pos;                            ///< byte position in stream, -1 if unknown\n\n    /**\n     * Time difference in AVStream->time_base units from the pts of this\n     * packet to the point at which the output from the decoder has converged\n     * independent from the availability of previous frames. That is, the\n     * frames are virtually identical no matter if decoding started from\n     * the very first frame or from this keyframe.\n     * Is AV_NOPTS_VALUE if unknown.\n     * This field is not the display duration of the current packet.\n     * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY\n     * set.\n     *\n     * The purpose of this field is to allow seeking in streams that have no\n     * keyframes in the conventional sense. It corresponds to the\n     * recovery point SEI in H.264 and match_time_delta in NUT. It is also\n     * essential for some types of subtitle streams to ensure that all\n     * subtitles are correctly displayed after seeking.\n     */\n    int64_t convergence_duration;\n} AVPacket;\n#define AV_PKT_FLAG_KEY     0x0001 ///< The packet contains a keyframe\n#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted\n\nenum AVSideDataParamChangeFlags {\n    AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT  = 0x0001,\n    AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,\n    AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE    = 0x0004,\n    AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS     = 0x0008,\n};\n/**\n * @}\n */\n\nstruct AVCodecInternal;\n\nenum AVFieldOrder {\n    AV_FIELD_UNKNOWN,\n    AV_FIELD_PROGRESSIVE,\n    AV_FIELD_TT,          //< Top coded_first, top displayed first\n    AV_FIELD_BB,          //< Bottom coded first, bottom displayed first\n    AV_FIELD_TB,          //< Top coded first, bottom displayed first\n    AV_FIELD_BT,          //< Bottom coded first, top displayed first\n};\n\n/**\n * main external API structure.\n * New fields can be added to the end with minor version bumps.\n * Removal, reordering and changes to existing fields require a major\n * version bump.\n * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user\n * applications.\n * sizeof(AVCodecContext) must not be used outside libav*.\n */\ntypedef struct AVCodecContext {\n    /**\n     * information on struct for av_log\n     * - set by avcodec_alloc_context3\n     */\n    const AVClass *av_class;\n    int log_level_offset;\n\n    enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */\n    const struct AVCodec  *codec;\n    char             codec_name[32];\n    enum AVCodecID     codec_id; /* see AV_CODEC_ID_xxx */\n\n    /**\n     * fourcc (LSB first, so \"ABCD\" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').\n     * This is used to work around some encoder bugs.\n     * A demuxer should set this to what is stored in the field used to identify the codec.\n     * If there are multiple such fields in a container then the demuxer should choose the one\n     * which maximizes the information about the used codec.\n     * If the codec tag field in a container is larger than 32 bits then the demuxer should\n     * remap the longer ID to 32 bits with a table or other structure. Alternatively a new\n     * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated\n     * first.\n     * - encoding: Set by user, if not then the default based on codec_id will be used.\n     * - decoding: Set by user, will be converted to uppercase by libavcodec during init.\n     */\n    unsigned int codec_tag;\n\n    /**\n     * fourcc from the AVI stream header (LSB first, so \"ABCD\" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').\n     * This is used to work around some encoder bugs.\n     * - encoding: unused\n     * - decoding: Set by user, will be converted to uppercase by libavcodec during init.\n     */\n    unsigned int stream_codec_tag;\n\n    void *priv_data;\n\n    /**\n     * Private context used for internal data.\n     *\n     * Unlike priv_data, this is not codec-specific. It is used in general\n     * libavcodec functions.\n     */\n    struct AVCodecInternal *internal;\n\n    /**\n     * Private data of the user, can be used to carry app specific stuff.\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    void *opaque;\n\n    /**\n     * the average bitrate\n     * - encoding: Set by user; unused for constant quantizer encoding.\n     * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream.\n     */\n    int bit_rate;\n\n    /**\n     * number of bits the bitstream is allowed to diverge from the reference.\n     *           the reference can be CBR (for CBR pass1) or VBR (for pass2)\n     * - encoding: Set by user; unused for constant quantizer encoding.\n     * - decoding: unused\n     */\n    int bit_rate_tolerance;\n\n    /**\n     * Global quality for codecs which cannot change it per frame.\n     * This should be proportional to MPEG-1/2/4 qscale.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int global_quality;\n\n    /**\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int compression_level;\n#define FF_COMPRESSION_DEFAULT -1\n\n    /**\n     * CODEC_FLAG_*.\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int flags;\n\n    /**\n     * CODEC_FLAG2_*\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int flags2;\n\n    /**\n     * some codecs need / can use extradata like Huffman tables.\n     * mjpeg: Huffman tables\n     * rv10: additional flags\n     * mpeg4: global headers (they can be in the bitstream or here)\n     * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger\n     * than extradata_size to avoid problems if it is read with the bitstream reader.\n     * The bytewise contents of extradata must not depend on the architecture or CPU endianness.\n     * - encoding: Set/allocated/freed by libavcodec.\n     * - decoding: Set/allocated/freed by user.\n     */\n    uint8_t *extradata;\n    int extradata_size;\n\n    /**\n     * This is the fundamental unit of time (in seconds) in terms\n     * of which frame timestamps are represented. For fixed-fps content,\n     * timebase should be 1/framerate and timestamp increments should be\n     * identically 1.\n     * - encoding: MUST be set by user.\n     * - decoding: Set by libavcodec.\n     */\n    AVRational time_base;\n\n    /**\n     * For some codecs, the time base is closer to the field rate than the frame rate.\n     * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration\n     * if no telecine is used ...\n     *\n     * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.\n     */\n    int ticks_per_frame;\n\n    /**\n     * Codec delay.\n     *\n     * Encoding: Number of frames delay there will be from the encoder input to\n     *           the decoder output. (we assume the decoder matches the spec)\n     * Decoding: Number of frames delay in addition to what a standard decoder\n     *           as specified in the spec would produce.\n     *\n     * Video:\n     *   Number of frames the decoded output will be delayed relative to the\n     *   encoded input.\n     *\n     * Audio:\n     *   For encoding, this is the number of \"priming\" samples added to the\n     *   beginning of the stream. The decoded output will be delayed by this\n     *   many samples relative to the input to the encoder. Note that this\n     *   field is purely informational and does not directly affect the pts\n     *   output by the encoder, which should always be based on the actual\n     *   presentation time, including any delay.\n     *   For decoding, this is the number of samples the decoder needs to\n     *   output before the decoder's output is valid. When seeking, you should\n     *   start decoding this many samples prior to your desired seek point.\n     *\n     * - encoding: Set by libavcodec.\n     * - decoding: Set by libavcodec.\n     */\n    int delay;\n\n\n    /* video only */\n    /**\n     * picture width / height.\n     * - encoding: MUST be set by user.\n     * - decoding: May be set by the user before opening the decoder if known e.g.\n     *             from the container. Some decoders will require the dimensions\n     *             to be set by the caller. During decoding, the decoder may\n     *             overwrite those values as required.\n     */\n    int width, height;\n\n    /**\n     * Bitstream width / height, may be different from width/height e.g. when\n     * the decoded frame is cropped before being output or lowres is enabled.\n     * - encoding: unused\n     * - decoding: May be set by the user before opening the decoder if known\n     *             e.g. from the container. During decoding, the decoder may\n     *             overwrite those values as required.\n     */\n    int coded_width, coded_height;\n\n#if FF_API_ASPECT_EXTENDED\n#define FF_ASPECT_EXTENDED 15\n#endif\n\n    /**\n     * the number of pictures in a group of pictures, or 0 for intra_only\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int gop_size;\n\n    /**\n     * Pixel format, see AV_PIX_FMT_xxx.\n     * May be set by the demuxer if known from headers.\n     * May be overridden by the decoder if it knows better.\n     * - encoding: Set by user.\n     * - decoding: Set by user if known, overridden by libavcodec if known\n     */\n    enum AVPixelFormat pix_fmt;\n\n    /**\n     * Motion estimation algorithm used for video coding.\n     * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex),\n     * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific]\n     * - encoding: MUST be set by user.\n     * - decoding: unused\n     */\n    int me_method;\n\n    /**\n     * If non NULL, 'draw_horiz_band' is called by the libavcodec\n     * decoder to draw a horizontal band. It improves cache usage. Not\n     * all codecs can do that. You must check the codec capabilities\n     * beforehand.\n     * When multithreading is used, it may be called from multiple threads\n     * at the same time; threads might draw different parts of the same AVFrame,\n     * or multiple AVFrames, and there is no guarantee that slices will be drawn\n     * in order.\n     * The function is also used by hardware acceleration APIs.\n     * It is called at least once during frame decoding to pass\n     * the data needed for hardware render.\n     * In that mode instead of pixel data, AVFrame points to\n     * a structure specific to the acceleration API. The application\n     * reads the structure and can change some fields to indicate progress\n     * or mark state.\n     * - encoding: unused\n     * - decoding: Set by user.\n     * @param height the height of the slice\n     * @param y the y position of the slice\n     * @param type 1->top field, 2->bottom field, 3->frame\n     * @param offset offset into the AVFrame.data from which the slice should be read\n     */\n    void (*draw_horiz_band)(struct AVCodecContext *s,\n                            const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],\n                            int y, int type, int height);\n\n    /**\n     * callback to negotiate the pixelFormat\n     * @param fmt is the list of formats which are supported by the codec,\n     * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.\n     * The first is always the native one.\n     * @return the chosen format\n     * - encoding: unused\n     * - decoding: Set by user, if not set the native format will be chosen.\n     */\n    enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);\n\n    /**\n     * maximum number of B-frames between non-B-frames\n     * Note: The output will be delayed by max_b_frames+1 relative to the input.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int max_b_frames;\n\n    /**\n     * qscale factor between IP and B-frames\n     * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).\n     * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float b_quant_factor;\n\n    /** obsolete FIXME remove */\n    int rc_strategy;\n#define FF_RC_STRATEGY_XVID 1\n\n    int b_frame_strategy;\n\n    /**\n     * qscale offset between IP and B-frames\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float b_quant_offset;\n\n    /**\n     * Size of the frame reordering buffer in the decoder.\n     * For MPEG-2 it is 1 IPB or 0 low delay IP.\n     * - encoding: Set by libavcodec.\n     * - decoding: Set by libavcodec.\n     */\n    int has_b_frames;\n\n    /**\n     * 0-> h263 quant 1-> mpeg quant\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mpeg_quant;\n\n    /**\n     * qscale factor between P and I-frames\n     * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset).\n     * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float i_quant_factor;\n\n    /**\n     * qscale offset between P and I-frames\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float i_quant_offset;\n\n    /**\n     * luminance masking (0-> disabled)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float lumi_masking;\n\n    /**\n     * temporary complexity masking (0-> disabled)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float temporal_cplx_masking;\n\n    /**\n     * spatial complexity masking (0-> disabled)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float spatial_cplx_masking;\n\n    /**\n     * p block masking (0-> disabled)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float p_masking;\n\n    /**\n     * darkness masking (0-> disabled)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float dark_masking;\n\n    /**\n     * slice count\n     * - encoding: Set by libavcodec.\n     * - decoding: Set by user (or 0).\n     */\n    int slice_count;\n    /**\n     * prediction method (needed for huffyuv)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n     int prediction_method;\n#define FF_PRED_LEFT   0\n#define FF_PRED_PLANE  1\n#define FF_PRED_MEDIAN 2\n\n    /**\n     * slice offsets in the frame in bytes\n     * - encoding: Set/allocated by libavcodec.\n     * - decoding: Set/allocated by user (or NULL).\n     */\n    int *slice_offset;\n\n    /**\n     * sample aspect ratio (0 if unknown)\n     * That is the width of a pixel divided by the height of the pixel.\n     * Numerator and denominator must be relatively prime and smaller than 256 for some video standards.\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    AVRational sample_aspect_ratio;\n\n    /**\n     * motion estimation comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_cmp;\n    /**\n     * subpixel motion estimation comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_sub_cmp;\n    /**\n     * macroblock comparison function (not supported yet)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mb_cmp;\n    /**\n     * interlaced DCT comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int ildct_cmp;\n#define FF_CMP_SAD    0\n#define FF_CMP_SSE    1\n#define FF_CMP_SATD   2\n#define FF_CMP_DCT    3\n#define FF_CMP_PSNR   4\n#define FF_CMP_BIT    5\n#define FF_CMP_RD     6\n#define FF_CMP_ZERO   7\n#define FF_CMP_VSAD   8\n#define FF_CMP_VSSE   9\n#define FF_CMP_NSSE   10\n#define FF_CMP_W53    11\n#define FF_CMP_W97    12\n#define FF_CMP_DCTMAX 13\n#define FF_CMP_DCT264 14\n#define FF_CMP_CHROMA 256\n\n    /**\n     * ME diamond size & shape\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int dia_size;\n\n    /**\n     * amount of previous MV predictors (2a+1 x 2a+1 square)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int last_predictor_count;\n\n    /**\n     * prepass for motion estimation\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int pre_me;\n\n    /**\n     * motion estimation prepass comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_pre_cmp;\n\n    /**\n     * ME prepass diamond size & shape\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int pre_dia_size;\n\n    /**\n     * subpel ME quality\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_subpel_quality;\n\n    /**\n     * DTG active format information (additional aspect ratio\n     * information only used in DVB MPEG-2 transport streams)\n     * 0 if not set.\n     *\n     * - encoding: unused\n     * - decoding: Set by decoder.\n     */\n    int dtg_active_format;\n#define FF_DTG_AFD_SAME         8\n#define FF_DTG_AFD_4_3          9\n#define FF_DTG_AFD_16_9         10\n#define FF_DTG_AFD_14_9         11\n#define FF_DTG_AFD_4_3_SP_14_9  13\n#define FF_DTG_AFD_16_9_SP_14_9 14\n#define FF_DTG_AFD_SP_4_3       15\n\n    /**\n     * maximum motion estimation search range in subpel units\n     * If 0 then no limit.\n     *\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_range;\n\n    /**\n     * intra quantizer bias\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int intra_quant_bias;\n#define FF_DEFAULT_QUANT_BIAS 999999\n\n    /**\n     * inter quantizer bias\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int inter_quant_bias;\n\n    /**\n     * slice flags\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int slice_flags;\n#define SLICE_FLAG_CODED_ORDER    0x0001 ///< draw_horiz_band() is called in coded order instead of display\n#define SLICE_FLAG_ALLOW_FIELD    0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics)\n#define SLICE_FLAG_ALLOW_PLANE    0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)\n\n#if FF_API_XVMC\n    /**\n     * XVideo Motion Acceleration\n     * - encoding: forbidden\n     * - decoding: set by decoder\n     * @deprecated XvMC doesn't need it anymore.\n     */\n    attribute_deprecated int xvmc_acceleration;\n#endif /* FF_API_XVMC */\n\n    /**\n     * macroblock decision mode\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mb_decision;\n#define FF_MB_DECISION_SIMPLE 0        ///< uses mb_cmp\n#define FF_MB_DECISION_BITS   1        ///< chooses the one which needs the fewest bits\n#define FF_MB_DECISION_RD     2        ///< rate distortion\n\n    /**\n     * custom intra quantization matrix\n     * - encoding: Set by user, can be NULL.\n     * - decoding: Set by libavcodec.\n     */\n    uint16_t *intra_matrix;\n\n    /**\n     * custom inter quantization matrix\n     * - encoding: Set by user, can be NULL.\n     * - decoding: Set by libavcodec.\n     */\n    uint16_t *inter_matrix;\n\n    /**\n     * scene change detection threshold\n     * 0 is default, larger means fewer detected scene changes.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int scenechange_threshold;\n\n    /**\n     * noise reduction strength\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int noise_reduction;\n\n    /**\n     * Motion estimation threshold below which no motion estimation is\n     * performed, but instead the user specified motion vectors are used.\n     *\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_threshold;\n\n    /**\n     * Macroblock threshold below which the user specified macroblock types will be used.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mb_threshold;\n\n    /**\n     * precision of the intra DC coefficient - 8\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int intra_dc_precision;\n\n    /**\n     * Number of macroblock rows at the top which are skipped.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int skip_top;\n\n    /**\n     * Number of macroblock rows at the bottom which are skipped.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int skip_bottom;\n\n    /**\n     * Border processing masking, raises the quantizer for mbs on the borders\n     * of the picture.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float border_masking;\n\n    /**\n     * minimum MB lagrange multipler\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mb_lmin;\n\n    /**\n     * maximum MB lagrange multipler\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mb_lmax;\n\n    /**\n     *\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_penalty_compensation;\n\n    /**\n     *\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int bidir_refine;\n\n    /**\n     *\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int brd_scale;\n\n    /**\n     * minimum GOP size\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int keyint_min;\n\n    /**\n     * number of reference frames\n     * - encoding: Set by user.\n     * - decoding: Set by lavc.\n     */\n    int refs;\n\n    /**\n     * chroma qp offset from luma\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int chromaoffset;\n\n    /**\n     * Multiplied by qscale for each frame and added to scene_change_score.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int scenechange_factor;\n\n    /**\n     *\n     * Note: Value depends upon the compare function used for fullpel ME.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mv0_threshold;\n\n    /**\n     * Adjust sensitivity of b_frame_strategy 1.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int b_sensitivity;\n\n    /**\n     * Chromaticity coordinates of the source primaries.\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorPrimaries color_primaries;\n\n    /**\n     * Color Transfer Characteristic.\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorTransferCharacteristic color_trc;\n\n    /**\n     * YUV colorspace type.\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorSpace colorspace;\n\n    /**\n     * MPEG vs JPEG YUV range.\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorRange color_range;\n\n    /**\n     * This defines the location of chroma samples.\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVChromaLocation chroma_sample_location;\n\n    /**\n     * Number of slices.\n     * Indicates number of picture subdivisions. Used for parallelized\n     * decoding.\n     * - encoding: Set by user\n     * - decoding: unused\n     */\n    int slices;\n\n    /** Field order\n     * - encoding: set by libavcodec\n     * - decoding: Set by user.\n     */\n    enum AVFieldOrder field_order;\n\n    /* audio only */\n    int sample_rate; ///< samples per second\n    int channels;    ///< number of audio channels\n\n    /**\n     * audio sample format\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    enum AVSampleFormat sample_fmt;  ///< sample format\n\n    /* The following data should not be initialized. */\n    /**\n     * Number of samples per channel in an audio frame.\n     *\n     * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame\n     *   except the last must contain exactly frame_size samples per channel.\n     *   May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the\n     *   frame size is not restricted.\n     * - decoding: may be set by some decoders to indicate constant frame size\n     */\n    int frame_size;\n\n    /**\n     * Frame counter, set by libavcodec.\n     *\n     * - decoding: total number of frames returned from the decoder so far.\n     * - encoding: total number of frames passed to the encoder so far.\n     *\n     *   @note the counter is not incremented if encoding/decoding resulted in\n     *   an error.\n     */\n    int frame_number;\n\n    /**\n     * number of bytes per packet if constant and known or 0\n     * Used by some WAV based audio codecs.\n     */\n    int block_align;\n\n    /**\n     * Audio cutoff bandwidth (0 means \"automatic\")\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int cutoff;\n\n#if FF_API_REQUEST_CHANNELS\n    /**\n     * Decoder should decode to this many channels if it can (0 for default)\n     * - encoding: unused\n     * - decoding: Set by user.\n     * @deprecated Deprecated in favor of request_channel_layout.\n     */\n    attribute_deprecated int request_channels;\n#endif\n\n    /**\n     * Audio channel layout.\n     * - encoding: set by user.\n     * - decoding: set by user, may be overwritten by libavcodec.\n     */\n    uint64_t channel_layout;\n\n    /**\n     * Request decoder to use this channel layout if it can (0 for default)\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    uint64_t request_channel_layout;\n\n    /**\n     * Type of service that the audio stream conveys.\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    enum AVAudioServiceType audio_service_type;\n\n    /**\n     * desired sample format\n     * - encoding: Not used.\n     * - decoding: Set by user.\n     * Decoder will decode to this format if it can.\n     */\n    enum AVSampleFormat request_sample_fmt;\n\n#if FF_API_GET_BUFFER\n    /**\n     * Called at the beginning of each frame to get a buffer for it.\n     *\n     * The function will set AVFrame.data[], AVFrame.linesize[].\n     * AVFrame.extended_data[] must also be set, but it should be the same as\n     * AVFrame.data[] except for planar audio with more channels than can fit\n     * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as\n     * many data pointers as it can hold.\n     *\n     * if CODEC_CAP_DR1 is not set then get_buffer() must call\n     * avcodec_default_get_buffer() instead of providing buffers allocated by\n     * some other means.\n     *\n     * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't\n     * need it. avcodec_default_get_buffer() aligns the output buffer properly,\n     * but if get_buffer() is overridden then alignment considerations should\n     * be taken into account.\n     *\n     * @see avcodec_default_get_buffer()\n     *\n     * Video:\n     *\n     * If pic.reference is set then the frame will be read later by libavcodec.\n     * avcodec_align_dimensions2() should be used to find the required width and\n     * height, as they normally need to be rounded up to the next multiple of 16.\n     *\n     * If frame multithreading is used and thread_safe_callbacks is set,\n     * it may be called from a different thread, but not from more than one at\n     * once. Does not need to be reentrant.\n     *\n     * @see release_buffer(), reget_buffer()\n     * @see avcodec_align_dimensions2()\n     *\n     * Audio:\n     *\n     * Decoders request a buffer of a particular size by setting\n     * AVFrame.nb_samples prior to calling get_buffer(). The decoder may,\n     * however, utilize only part of the buffer by setting AVFrame.nb_samples\n     * to a smaller value in the output frame.\n     *\n     * Decoders cannot use the buffer after returning from\n     * avcodec_decode_audio4(), so they will not call release_buffer(), as it\n     * is assumed to be released immediately upon return. In some rare cases,\n     * a decoder may need to call get_buffer() more than once in a single\n     * call to avcodec_decode_audio4(). In that case, when get_buffer() is\n     * called again after it has already been called once, the previously\n     * acquired buffer is assumed to be released at that time and may not be\n     * reused by the decoder.\n     *\n     * As a convenience, av_samples_get_buffer_size() and\n     * av_samples_fill_arrays() in libavutil may be used by custom get_buffer()\n     * functions to find the required data size and to fill data pointers and\n     * linesize. In AVFrame.linesize, only linesize[0] may be set for audio\n     * since all planes must be the same size.\n     *\n     * @see av_samples_get_buffer_size(), av_samples_fill_arrays()\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec, user can override.\n     *\n     * @deprecated use get_buffer2()\n     */\n    attribute_deprecated\n    int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);\n\n    /**\n     * Called to release buffers which were allocated with get_buffer.\n     * A released buffer can be reused in get_buffer().\n     * pic.data[*] must be set to NULL.\n     * May be called from a different thread if frame multithreading is used,\n     * but not by more than one thread at once, so does not need to be reentrant.\n     * - encoding: unused\n     * - decoding: Set by libavcodec, user can override.\n     *\n     * @deprecated custom freeing callbacks should be set from get_buffer2()\n     */\n    attribute_deprecated\n    void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);\n\n    /**\n     * Called at the beginning of a frame to get cr buffer for it.\n     * Buffer type (size, hints) must be the same. libavcodec won't check it.\n     * libavcodec will pass previous buffer in pic, function should return\n     * same buffer or new buffer with old frame \"painted\" into it.\n     * If pic.data[0] == NULL must behave like get_buffer().\n     * if CODEC_CAP_DR1 is not set then reget_buffer() must call\n     * avcodec_default_reget_buffer() instead of providing buffers allocated by\n     * some other means.\n     * - encoding: unused\n     * - decoding: Set by libavcodec, user can override.\n     */\n    attribute_deprecated\n    int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);\n#endif\n\n    /**\n     * This callback is called at the beginning of each frame to get data\n     * buffer(s) for it. There may be one contiguous buffer for all the data or\n     * there may be a buffer per each data plane or anything in between. What\n     * this means is, you may set however many entries in buf[] you feel necessary.\n     * Each buffer must be reference-counted using the AVBuffer API (see description\n     * of buf[] below).\n     *\n     * The following fields will be set in the frame before this callback is\n     * called:\n     * - format\n     * - width, height (video only)\n     * - sample_rate, channel_layout, nb_samples (audio only)\n     * Their values may differ from the corresponding values in\n     * AVCodecContext. This callback must use the frame values, not the codec\n     * context values, to calculate the required buffer size.\n     *\n     * This callback must fill the following fields in the frame:\n     * - data[]\n     * - linesize[]\n     * - extended_data:\n     *   * if the data is planar audio with more than 8 channels, then this\n     *     callback must allocate and fill extended_data to contain all pointers\n     *     to all data planes. data[] must hold as many pointers as it can.\n     *     extended_data must be allocated with av_malloc() and will be freed in\n     *     av_frame_unref().\n     *   * otherwise exended_data must point to data\n     * - buf[] must contain one or more pointers to AVBufferRef structures. Each of\n     *   the frame's data and extended_data pointers must be contained in these. That\n     *   is, one AVBufferRef for each allocated chunk of memory, not necessarily one\n     *   AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(),\n     *   and av_buffer_ref().\n     * - extended_buf and nb_extended_buf must be allocated with av_malloc() by\n     *   this callback and filled with the extra buffers if there are more\n     *   buffers than buf[] can hold. extended_buf will be freed in\n     *   av_frame_unref().\n     *\n     * If CODEC_CAP_DR1 is not set then get_buffer2() must call\n     * avcodec_default_get_buffer2() instead of providing buffers allocated by\n     * some other means.\n     *\n     * Each data plane must be aligned to the maximum required by the target\n     * CPU.\n     *\n     * @see avcodec_default_get_buffer2()\n     *\n     * Video:\n     *\n     * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused\n     * (read and/or written to if it is writable) later by libavcodec.\n     *\n     * avcodec_align_dimensions2() should be used to find the required width and\n     * height, as they normally need to be rounded up to the next multiple of 16.\n     *\n     * Some decoders do not support linesizes changing between frames.\n     *\n     * If frame multithreading is used and thread_safe_callbacks is set,\n     * this callback may be called from a different thread, but not from more\n     * than one at once. Does not need to be reentrant.\n     *\n     * @see avcodec_align_dimensions2()\n     *\n     * Audio:\n     *\n     * Decoders request a buffer of a particular size by setting\n     * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,\n     * however, utilize only part of the buffer by setting AVFrame.nb_samples\n     * to a smaller value in the output frame.\n     *\n     * As a convenience, av_samples_get_buffer_size() and\n     * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()\n     * functions to find the required data size and to fill data pointers and\n     * linesize. In AVFrame.linesize, only linesize[0] may be set for audio\n     * since all planes must be the same size.\n     *\n     * @see av_samples_get_buffer_size(), av_samples_fill_arrays()\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec, user can override.\n     */\n    int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);\n\n    /**\n     * If non-zero, the decoded audio and video frames returned from\n     * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted\n     * and are valid indefinitely. The caller must free them with\n     * av_frame_unref() when they are not needed anymore.\n     * Otherwise, the decoded frames must not be freed by the caller and are\n     * only valid until the next decode call.\n     *\n     * - encoding: unused\n     * - decoding: set by the caller before avcodec_open2().\n     */\n    int refcounted_frames;\n\n    /* - encoding parameters */\n    float qcompress;  ///< amount of qscale change between easy & hard scenes (0.0-1.0)\n    float qblur;      ///< amount of qscale smoothing over time (0.0-1.0)\n\n    /**\n     * minimum quantizer\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int qmin;\n\n    /**\n     * maximum quantizer\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int qmax;\n\n    /**\n     * maximum quantizer difference between frames\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int max_qdiff;\n\n    /**\n     * ratecontrol qmin qmax limiting method\n     * 0-> clipping, 1-> use a nice continuous function to limit qscale wthin qmin/qmax.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float rc_qsquish;\n\n    float rc_qmod_amp;\n    int rc_qmod_freq;\n\n    /**\n     * decoder bitstream buffer size\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int rc_buffer_size;\n\n    /**\n     * ratecontrol override, see RcOverride\n     * - encoding: Allocated/set/freed by user.\n     * - decoding: unused\n     */\n    int rc_override_count;\n    RcOverride *rc_override;\n\n    /**\n     * rate control equation\n     * - encoding: Set by user\n     * - decoding: unused\n     */\n    const char *rc_eq;\n\n    /**\n     * maximum bitrate\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int rc_max_rate;\n\n    /**\n     * minimum bitrate\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int rc_min_rate;\n\n    float rc_buffer_aggressivity;\n\n    /**\n     * initial complexity for pass1 ratecontrol\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float rc_initial_cplx;\n\n    /**\n     * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow.\n     * - encoding: Set by user.\n     * - decoding: unused.\n     */\n    float rc_max_available_vbv_use;\n\n    /**\n     * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow.\n     * - encoding: Set by user.\n     * - decoding: unused.\n     */\n    float rc_min_vbv_overflow_use;\n\n    /**\n     * Number of bits which should be loaded into the rc buffer before decoding starts.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int rc_initial_buffer_occupancy;\n\n#define FF_CODER_TYPE_VLC       0\n#define FF_CODER_TYPE_AC        1\n#define FF_CODER_TYPE_RAW       2\n#define FF_CODER_TYPE_RLE       3\n#define FF_CODER_TYPE_DEFLATE   4\n    /**\n     * coder type\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int coder_type;\n\n    /**\n     * context model\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int context_model;\n\n    /**\n     * minimum Lagrange multipler\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int lmin;\n\n    /**\n     * maximum Lagrange multipler\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int lmax;\n\n    /**\n     * frame skip threshold\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int frame_skip_threshold;\n\n    /**\n     * frame skip factor\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int frame_skip_factor;\n\n    /**\n     * frame skip exponent\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int frame_skip_exp;\n\n    /**\n     * frame skip comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int frame_skip_cmp;\n\n    /**\n     * trellis RD quantization\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int trellis;\n\n    /**\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int min_prediction_order;\n\n    /**\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int max_prediction_order;\n\n    /**\n     * GOP timecode frame start number\n     * - encoding: Set by user, in non drop frame format\n     * - decoding: Set by libavcodec (timecode in the 25 bits format, -1 if unset)\n     */\n    int64_t timecode_frame_start;\n\n    /* The RTP callback: This function is called    */\n    /* every time the encoder has a packet to send. */\n    /* It depends on the encoder if the data starts */\n    /* with a Start Code (it should). H.263 does.   */\n    /* mb_nb contains the number of macroblocks     */\n    /* encoded in the RTP payload.                  */\n    void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb);\n\n    int rtp_payload_size;   /* The size of the RTP payload: the coder will  */\n                            /* do its best to deliver a chunk with size     */\n                            /* below rtp_payload_size, the chunk will start */\n                            /* with a start code on some codecs like H.263. */\n                            /* This doesn't take account of any particular  */\n                            /* headers inside the transmitted RTP payload.  */\n\n    /* statistics, used for 2-pass encoding */\n    int mv_bits;\n    int header_bits;\n    int i_tex_bits;\n    int p_tex_bits;\n    int i_count;\n    int p_count;\n    int skip_count;\n    int misc_bits;\n\n    /**\n     * number of bits used for the previously encoded frame\n     * - encoding: Set by libavcodec.\n     * - decoding: unused\n     */\n    int frame_bits;\n\n    /**\n     * pass1 encoding statistics output buffer\n     * - encoding: Set by libavcodec.\n     * - decoding: unused\n     */\n    char *stats_out;\n\n    /**\n     * pass2 encoding statistics input buffer\n     * Concatenated stuff from stats_out of pass1 should be placed here.\n     * - encoding: Allocated/set/freed by user.\n     * - decoding: unused\n     */\n    char *stats_in;\n\n    /**\n     * Work around bugs in encoders which sometimes cannot be detected automatically.\n     * - encoding: Set by user\n     * - decoding: Set by user\n     */\n    int workaround_bugs;\n#define FF_BUG_AUTODETECT       1  ///< autodetection\n#if FF_API_OLD_MSMPEG4\n#define FF_BUG_OLD_MSMPEG4      2\n#endif\n#define FF_BUG_XVID_ILACE       4\n#define FF_BUG_UMP4             8\n#define FF_BUG_NO_PADDING       16\n#define FF_BUG_AMV              32\n#if FF_API_AC_VLC\n#define FF_BUG_AC_VLC           0  ///< Will be removed, libavcodec can now handle these non-compliant files by default.\n#endif\n#define FF_BUG_QPEL_CHROMA      64\n#define FF_BUG_STD_QPEL         128\n#define FF_BUG_QPEL_CHROMA2     256\n#define FF_BUG_DIRECT_BLOCKSIZE 512\n#define FF_BUG_EDGE             1024\n#define FF_BUG_HPEL_CHROMA      2048\n#define FF_BUG_DC_CLIP          4096\n#define FF_BUG_MS               8192 ///< Work around various bugs in Microsoft's broken decoders.\n#define FF_BUG_TRUNCATED       16384\n\n    /**\n     * strictly follow the standard (MPEG4, ...).\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     * Setting this to STRICT or higher means the encoder and decoder will\n     * generally do stupid things, whereas setting it to unofficial or lower\n     * will mean the encoder might produce output that is not supported by all\n     * spec-compliant decoders. Decoders don't differentiate between normal,\n     * unofficial and experimental (that is, they always try to decode things\n     * when they can) unless they are explicitly asked to behave stupidly\n     * (=strictly conform to the specs)\n     */\n    int strict_std_compliance;\n#define FF_COMPLIANCE_VERY_STRICT   2 ///< Strictly conform to an older more strict version of the spec or reference software.\n#define FF_COMPLIANCE_STRICT        1 ///< Strictly conform to all the things in the spec no matter what consequences.\n#define FF_COMPLIANCE_NORMAL        0\n#define FF_COMPLIANCE_UNOFFICIAL   -1 ///< Allow unofficial extensions\n#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.\n\n    /**\n     * error concealment flags\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int error_concealment;\n#define FF_EC_GUESS_MVS   1\n#define FF_EC_DEBLOCK     2\n\n    /**\n     * debug\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int debug;\n#define FF_DEBUG_PICT_INFO   1\n#define FF_DEBUG_RC          2\n#define FF_DEBUG_BITSTREAM   4\n#define FF_DEBUG_MB_TYPE     8\n#define FF_DEBUG_QP          16\n#if FF_API_DEBUG_MV\n/**\n * @deprecated this option does nothing\n */\n#define FF_DEBUG_MV          32\n#endif\n#define FF_DEBUG_DCT_COEFF   0x00000040\n#define FF_DEBUG_SKIP        0x00000080\n#define FF_DEBUG_STARTCODE   0x00000100\n#define FF_DEBUG_PTS         0x00000200\n#define FF_DEBUG_ER          0x00000400\n#define FF_DEBUG_MMCO        0x00000800\n#define FF_DEBUG_BUGS        0x00001000\n#if FF_API_DEBUG_MV\n#define FF_DEBUG_VIS_QP      0x00002000 ///< only access through AVOptions from outside libavcodec\n#define FF_DEBUG_VIS_MB_TYPE 0x00004000 ///< only access through AVOptions from outside libavcodec\n#endif\n#define FF_DEBUG_BUFFERS     0x00008000\n#define FF_DEBUG_THREADS     0x00010000\n\n#if FF_API_DEBUG_MV\n    /**\n     * debug\n     * Code outside libavcodec should access this field using AVOptions\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int debug_mv;\n#define FF_DEBUG_VIS_MV_P_FOR  0x00000001 //visualize forward predicted MVs of P frames\n#define FF_DEBUG_VIS_MV_B_FOR  0x00000002 //visualize forward predicted MVs of B frames\n#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames\n#endif\n\n    /**\n     * Error recognition; may misdetect some more or less valid parts as errors.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int err_recognition;\n\n/**\n * Verify checksums embedded in the bitstream (could be of either encoded or\n * decoded data, depending on the codec) and print an error message on mismatch.\n * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the\n * decoder returning an error.\n */\n#define AV_EF_CRCCHECK  (1<<0)\n#define AV_EF_BITSTREAM (1<<1)          ///< detect bitstream specification deviations\n#define AV_EF_BUFFER    (1<<2)          ///< detect improper bitstream length\n#define AV_EF_EXPLODE   (1<<3)          ///< abort decoding on minor error detection\n\n#define AV_EF_CAREFUL    (1<<16)        ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors\n#define AV_EF_COMPLIANT  (1<<17)        ///< consider all spec non compliancies as errors\n#define AV_EF_AGGRESSIVE (1<<18)        ///< consider things that a sane encoder should not do as an error\n\n\n    /**\n     * opaque 64bit number (generally a PTS) that will be reordered and\n     * output in AVFrame.reordered_opaque\n     * @deprecated in favor of pkt_pts\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int64_t reordered_opaque;\n\n    /**\n     * Hardware accelerator in use\n     * - encoding: unused.\n     * - decoding: Set by libavcodec\n     */\n    struct AVHWAccel *hwaccel;\n\n    /**\n     * Hardware accelerator context.\n     * For some hardware accelerators, a global context needs to be\n     * provided by the user. In that case, this holds display-dependent\n     * data FFmpeg cannot instantiate itself. Please refer to the\n     * FFmpeg HW accelerator documentation to know how to fill this\n     * is. e.g. for VA API, this is a struct vaapi_context.\n     * - encoding: unused\n     * - decoding: Set by user\n     */\n    void *hwaccel_context;\n\n    /**\n     * error\n     * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.\n     * - decoding: unused\n     */\n    uint64_t error[AV_NUM_DATA_POINTERS];\n\n    /**\n     * DCT algorithm, see FF_DCT_* below\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int dct_algo;\n#define FF_DCT_AUTO    0\n#define FF_DCT_FASTINT 1\n#define FF_DCT_INT     2\n#define FF_DCT_MMX     3\n#define FF_DCT_ALTIVEC 5\n#define FF_DCT_FAAN    6\n\n    /**\n     * IDCT algorithm, see FF_IDCT_* below.\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int idct_algo;\n#define FF_IDCT_AUTO          0\n#define FF_IDCT_INT           1\n#define FF_IDCT_SIMPLE        2\n#define FF_IDCT_SIMPLEMMX     3\n#define FF_IDCT_ARM           7\n#define FF_IDCT_ALTIVEC       8\n#define FF_IDCT_SH4           9\n#define FF_IDCT_SIMPLEARM     10\n#define FF_IDCT_IPP           13\n#define FF_IDCT_XVIDMMX       14\n#define FF_IDCT_SIMPLEARMV5TE 16\n#define FF_IDCT_SIMPLEARMV6   17\n#define FF_IDCT_SIMPLEVIS     18\n#define FF_IDCT_FAAN          20\n#define FF_IDCT_SIMPLENEON    22\n#if FF_API_ARCH_ALPHA\n#define FF_IDCT_SIMPLEALPHA   23\n#endif\n\n    /**\n     * bits per sample/pixel from the demuxer (needed for huffyuv).\n     * - encoding: Set by libavcodec.\n     * - decoding: Set by user.\n     */\n     int bits_per_coded_sample;\n\n    /**\n     * Bits per sample/pixel of internal libavcodec pixel/sample format.\n     * - encoding: set by user.\n     * - decoding: set by libavcodec.\n     */\n    int bits_per_raw_sample;\n\n#if FF_API_LOWRES\n    /**\n     * low resolution decoding, 1-> 1/2 size, 2->1/4 size\n     * - encoding: unused\n     * - decoding: Set by user.\n     * Code outside libavcodec should access this field using:\n     * av_codec_{get,set}_lowres(avctx)\n     */\n     int lowres;\n#endif\n\n    /**\n     * the picture in the bitstream\n     * - encoding: Set by libavcodec.\n     * - decoding: unused\n     */\n    AVFrame *coded_frame;\n\n    /**\n     * thread count\n     * is used to decide how many independent tasks should be passed to execute()\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int thread_count;\n\n    /**\n     * Which multithreading methods to use.\n     * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,\n     * so clients which cannot provide future frames should not use it.\n     *\n     * - encoding: Set by user, otherwise the default is used.\n     * - decoding: Set by user, otherwise the default is used.\n     */\n    int thread_type;\n#define FF_THREAD_FRAME   1 ///< Decode more than one frame at once\n#define FF_THREAD_SLICE   2 ///< Decode more than one part of a single frame at once\n\n    /**\n     * Which multithreading methods are in use by the codec.\n     * - encoding: Set by libavcodec.\n     * - decoding: Set by libavcodec.\n     */\n    int active_thread_type;\n\n    /**\n     * Set by the client if its custom get_buffer() callback can be called\n     * synchronously from another thread, which allows faster multithreaded decoding.\n     * draw_horiz_band() will be called from other threads regardless of this setting.\n     * Ignored if the default get_buffer() is used.\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int thread_safe_callbacks;\n\n    /**\n     * The codec may call this to execute several independent things.\n     * It will return only after finishing all tasks.\n     * The user may replace this with some multithreaded implementation,\n     * the default implementation will execute the parts serially.\n     * @param count the number of things to execute\n     * - encoding: Set by libavcodec, user can override.\n     * - decoding: Set by libavcodec, user can override.\n     */\n    int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);\n\n    /**\n     * The codec may call this to execute several independent things.\n     * It will return only after finishing all tasks.\n     * The user may replace this with some multithreaded implementation,\n     * the default implementation will execute the parts serially.\n     * Also see avcodec_thread_init and e.g. the --enable-pthread configure option.\n     * @param c context passed also to func\n     * @param count the number of things to execute\n     * @param arg2 argument passed unchanged to func\n     * @param ret return values of executed functions, must have space for \"count\" values. May be NULL.\n     * @param func function that will be called count times, with jobnr from 0 to count-1.\n     *             threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no\n     *             two instances of func executing at the same time will have the same threadnr.\n     * @return always 0 currently, but code should handle a future improvement where when any call to func\n     *         returns < 0 no further calls to func may be done and < 0 is returned.\n     * - encoding: Set by libavcodec, user can override.\n     * - decoding: Set by libavcodec, user can override.\n     */\n    int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);\n\n#if FF_API_THREAD_OPAQUE\n    /**\n     * @deprecated this field should not be used from outside of lavc\n     */\n    attribute_deprecated\n    void *thread_opaque;\n#endif\n\n    /**\n     * noise vs. sse weight for the nsse comparsion function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n     int nsse_weight;\n\n    /**\n     * profile\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n     int profile;\n#define FF_PROFILE_UNKNOWN -99\n#define FF_PROFILE_RESERVED -100\n\n#define FF_PROFILE_AAC_MAIN 0\n#define FF_PROFILE_AAC_LOW  1\n#define FF_PROFILE_AAC_SSR  2\n#define FF_PROFILE_AAC_LTP  3\n#define FF_PROFILE_AAC_HE   4\n#define FF_PROFILE_AAC_HE_V2 28\n#define FF_PROFILE_AAC_LD   22\n#define FF_PROFILE_AAC_ELD  38\n#define FF_PROFILE_MPEG2_AAC_LOW 128\n#define FF_PROFILE_MPEG2_AAC_HE  131\n\n#define FF_PROFILE_DTS         20\n#define FF_PROFILE_DTS_ES      30\n#define FF_PROFILE_DTS_96_24   40\n#define FF_PROFILE_DTS_HD_HRA  50\n#define FF_PROFILE_DTS_HD_MA   60\n\n#define FF_PROFILE_MPEG2_422    0\n#define FF_PROFILE_MPEG2_HIGH   1\n#define FF_PROFILE_MPEG2_SS     2\n#define FF_PROFILE_MPEG2_SNR_SCALABLE  3\n#define FF_PROFILE_MPEG2_MAIN   4\n#define FF_PROFILE_MPEG2_SIMPLE 5\n\n#define FF_PROFILE_H264_CONSTRAINED  (1<<9)  // 8+1; constraint_set1_flag\n#define FF_PROFILE_H264_INTRA        (1<<11) // 8+3; constraint_set3_flag\n\n#define FF_PROFILE_H264_BASELINE             66\n#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED)\n#define FF_PROFILE_H264_MAIN                 77\n#define FF_PROFILE_H264_EXTENDED             88\n#define FF_PROFILE_H264_HIGH                 100\n#define FF_PROFILE_H264_HIGH_10              110\n#define FF_PROFILE_H264_HIGH_10_INTRA        (110|FF_PROFILE_H264_INTRA)\n#define FF_PROFILE_H264_HIGH_422             122\n#define FF_PROFILE_H264_HIGH_422_INTRA       (122|FF_PROFILE_H264_INTRA)\n#define FF_PROFILE_H264_HIGH_444             144\n#define FF_PROFILE_H264_HIGH_444_PREDICTIVE  244\n#define FF_PROFILE_H264_HIGH_444_INTRA       (244|FF_PROFILE_H264_INTRA)\n#define FF_PROFILE_H264_CAVLC_444            44\n\n#define FF_PROFILE_VC1_SIMPLE   0\n#define FF_PROFILE_VC1_MAIN     1\n#define FF_PROFILE_VC1_COMPLEX  2\n#define FF_PROFILE_VC1_ADVANCED 3\n\n#define FF_PROFILE_MPEG4_SIMPLE                     0\n#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE            1\n#define FF_PROFILE_MPEG4_CORE                       2\n#define FF_PROFILE_MPEG4_MAIN                       3\n#define FF_PROFILE_MPEG4_N_BIT                      4\n#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE           5\n#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION      6\n#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE     7\n#define FF_PROFILE_MPEG4_HYBRID                     8\n#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME         9\n#define FF_PROFILE_MPEG4_CORE_SCALABLE             10\n#define FF_PROFILE_MPEG4_ADVANCED_CODING           11\n#define FF_PROFILE_MPEG4_ADVANCED_CORE             12\n#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13\n#define FF_PROFILE_MPEG4_SIMPLE_STUDIO             14\n#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE           15\n\n#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0   0\n#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1   1\n#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION  2\n#define FF_PROFILE_JPEG2000_DCINEMA_2K              3\n#define FF_PROFILE_JPEG2000_DCINEMA_4K              4\n\n\n#define FF_PROFILE_HEVC_MAIN                        1\n#define FF_PROFILE_HEVC_MAIN_10                     2\n#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE          3\n\n    /**\n     * level\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n     int level;\n#define FF_LEVEL_UNKNOWN -99\n\n    /**\n     * Skip loop filtering for selected frames.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    enum AVDiscard skip_loop_filter;\n\n    /**\n     * Skip IDCT/dequantization for selected frames.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    enum AVDiscard skip_idct;\n\n    /**\n     * Skip decoding for selected frames.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    enum AVDiscard skip_frame;\n\n    /**\n     * Header containing style information for text subtitles.\n     * For SUBTITLE_ASS subtitle type, it should contain the whole ASS\n     * [Script Info] and [V4+ Styles] section, plus the [Events] line and\n     * the Format line following. It shouldn't include any Dialogue line.\n     * - encoding: Set/allocated/freed by user (before avcodec_open2())\n     * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())\n     */\n    uint8_t *subtitle_header;\n    int subtitle_header_size;\n\n#if FF_API_ERROR_RATE\n    /**\n     * @deprecated use the 'error_rate' private AVOption of the mpegvideo\n     * encoders\n     */\n    attribute_deprecated\n    int error_rate;\n#endif\n\n#if FF_API_CODEC_PKT\n    /**\n     * @deprecated this field is not supposed to be accessed from outside lavc\n     */\n    attribute_deprecated\n    AVPacket *pkt;\n#endif\n\n    /**\n     * VBV delay coded in the last frame (in periods of a 27 MHz clock).\n     * Used for compliant TS muxing.\n     * - encoding: Set by libavcodec.\n     * - decoding: unused.\n     */\n    uint64_t vbv_delay;\n\n    /**\n     * Timebase in which pkt_dts/pts and AVPacket.dts/pts are.\n     * Code outside libavcodec should access this field using:\n     * av_codec_{get,set}_pkt_timebase(avctx)\n     * - encoding unused.\n     * - decoding set by user.\n     */\n    AVRational pkt_timebase;\n\n    /**\n     * AVCodecDescriptor\n     * Code outside libavcodec should access this field using:\n     * av_codec_{get,set}_codec_descriptor(avctx)\n     * - encoding: unused.\n     * - decoding: set by libavcodec.\n     */\n    const AVCodecDescriptor *codec_descriptor;\n\n#if !FF_API_LOWRES\n    /**\n     * low resolution decoding, 1-> 1/2 size, 2->1/4 size\n     * - encoding: unused\n     * - decoding: Set by user.\n     * Code outside libavcodec should access this field using:\n     * av_codec_{get,set}_lowres(avctx)\n     */\n     int lowres;\n#endif\n\n    /**\n     * Current statistics for PTS correction.\n     * - decoding: maintained and used by libavcodec, not intended to be used by user apps\n     * - encoding: unused\n     */\n    int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far\n    int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far\n    int64_t pts_correction_last_pts;       /// PTS of the last frame\n    int64_t pts_correction_last_dts;       /// DTS of the last frame\n\n    /**\n     * Character encoding of the input subtitles file.\n     * - decoding: set by user\n     * - encoding: unused\n     */\n    char *sub_charenc;\n\n    /**\n     * Subtitles character encoding mode. Formats or codecs might be adjusting\n     * this setting (if they are doing the conversion themselves for instance).\n     * - decoding: set by libavcodec\n     * - encoding: unused\n     */\n    int sub_charenc_mode;\n#define FF_SUB_CHARENC_MODE_DO_NOTHING  -1  ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance)\n#define FF_SUB_CHARENC_MODE_AUTOMATIC    0  ///< libavcodec will select the mode itself\n#define FF_SUB_CHARENC_MODE_PRE_DECODER  1  ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv\n\n    /**\n     * Skip processing alpha if supported by codec.\n     * Note that if the format uses pre-multiplied alpha (common with VP6,\n     * and recommended due to better video quality/compression)\n     * the image will look as if alpha-blended onto a black background.\n     * However for formats that do not use pre-multiplied alpha\n     * there might be serious artefacts (though e.g. libswscale currently\n     * assumes pre-multiplied alpha anyway).\n     * Code outside libavcodec should access this field using AVOptions\n     *\n     * - decoding: set by user\n     * - encoding: unused\n     */\n    int skip_alpha;\n\n    /**\n     * Number of samples to skip after a discontinuity\n     * - decoding: unused\n     * - encoding: set by libavcodec\n     */\n    int seek_preroll;\n\n#if !FF_API_DEBUG_MV\n    /**\n     * debug motion vectors\n     * Code outside libavcodec should access this field using AVOptions\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int debug_mv;\n#define FF_DEBUG_VIS_MV_P_FOR  0x00000001 //visualize forward predicted MVs of P frames\n#define FF_DEBUG_VIS_MV_B_FOR  0x00000002 //visualize forward predicted MVs of B frames\n#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames\n#endif\n\n    /**\n     * custom intra quantization matrix\n     * Code outside libavcodec should access this field using av_codec_g/set_chroma_intra_matrix()\n     * - encoding: Set by user, can be NULL.\n     * - decoding: unused.\n     */\n    uint16_t *chroma_intra_matrix;\n} AVCodecContext;\n\nAVRational av_codec_get_pkt_timebase         (const AVCodecContext *avctx);\nvoid       av_codec_set_pkt_timebase         (AVCodecContext *avctx, AVRational val);\n\nconst AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx);\nvoid                     av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc);\n\nint  av_codec_get_lowres(const AVCodecContext *avctx);\nvoid av_codec_set_lowres(AVCodecContext *avctx, int val);\n\nint  av_codec_get_seek_preroll(const AVCodecContext *avctx);\nvoid av_codec_set_seek_preroll(AVCodecContext *avctx, int val);\n\nuint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx);\nvoid av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val);\n\n/**\n * AVProfile.\n */\ntypedef struct AVProfile {\n    int profile;\n    const char *name; ///< short name for the profile\n} AVProfile;\n\ntypedef struct AVCodecDefault AVCodecDefault;\n\nstruct AVSubtitle;\n\n/**\n * AVCodec.\n */\ntypedef struct AVCodec {\n    /**\n     * Name of the codec implementation.\n     * The name is globally unique among encoders and among decoders (but an\n     * encoder and a decoder can share the same name).\n     * This is the primary way to find a codec from the user perspective.\n     */\n    const char *name;\n    /**\n     * Descriptive name for the codec, meant to be more human readable than name.\n     * You should use the NULL_IF_CONFIG_SMALL() macro to define it.\n     */\n    const char *long_name;\n    enum AVMediaType type;\n    enum AVCodecID id;\n    /**\n     * Codec capabilities.\n     * see CODEC_CAP_*\n     */\n    int capabilities;\n    const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}\n    const enum AVPixelFormat *pix_fmts;     ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1\n    const int *supported_samplerates;       ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0\n    const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1\n    const uint64_t *channel_layouts;         ///< array of support channel layouts, or NULL if unknown. array is terminated by 0\n#if FF_API_LOWRES\n    uint8_t max_lowres;                     ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres()\n#endif\n    const AVClass *priv_class;              ///< AVClass for the private context\n    const AVProfile *profiles;              ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}\n\n    /*****************************************************************\n     * No fields below this line are part of the public API. They\n     * may not be used outside of libavcodec and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    int priv_data_size;\n    struct AVCodec *next;\n    /**\n     * @name Frame-level threading support functions\n     * @{\n     */\n    /**\n     * If defined, called on thread contexts when they are created.\n     * If the codec allocates writable tables in init(), re-allocate them here.\n     * priv_data will be set to a copy of the original.\n     */\n    int (*init_thread_copy)(AVCodecContext *);\n    /**\n     * Copy necessary context variables from a previous thread context to the current one.\n     * If not defined, the next thread will start automatically; otherwise, the codec\n     * must call ff_thread_finish_setup().\n     *\n     * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.\n     */\n    int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src);\n    /** @} */\n\n    /**\n     * Private codec-specific defaults.\n     */\n    const AVCodecDefault *defaults;\n\n    /**\n     * Initialize codec static data, called from avcodec_register().\n     */\n    void (*init_static_data)(struct AVCodec *codec);\n\n    int (*init)(AVCodecContext *);\n    int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size,\n                      const struct AVSubtitle *sub);\n    /**\n     * Encode data to an AVPacket.\n     *\n     * @param      avctx          codec context\n     * @param      avpkt          output AVPacket (may contain a user-provided buffer)\n     * @param[in]  frame          AVFrame containing the raw data to be encoded\n     * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a\n     *                            non-empty packet was returned in avpkt.\n     * @return 0 on success, negative error code on failure\n     */\n    int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,\n                   int *got_packet_ptr);\n    int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt);\n    int (*close)(AVCodecContext *);\n    /**\n     * Flush buffers.\n     * Will be called when seeking\n     */\n    void (*flush)(AVCodecContext *);\n} AVCodec;\n\nint av_codec_get_max_lowres(const AVCodec *codec);\n\nstruct MpegEncContext;\n\n/**\n * AVHWAccel.\n */\ntypedef struct AVHWAccel {\n    /**\n     * Name of the hardware accelerated codec.\n     * The name is globally unique among encoders and among decoders (but an\n     * encoder and a decoder can share the same name).\n     */\n    const char *name;\n\n    /**\n     * Type of codec implemented by the hardware accelerator.\n     *\n     * See AVMEDIA_TYPE_xxx\n     */\n    enum AVMediaType type;\n\n    /**\n     * Codec implemented by the hardware accelerator.\n     *\n     * See AV_CODEC_ID_xxx\n     */\n    enum AVCodecID id;\n\n    /**\n     * Supported pixel format.\n     *\n     * Only hardware accelerated formats are supported here.\n     */\n    enum AVPixelFormat pix_fmt;\n\n    /**\n     * Hardware accelerated codec capabilities.\n     * see FF_HWACCEL_CODEC_CAP_*\n     */\n    int capabilities;\n\n    struct AVHWAccel *next;\n\n    /**\n     * Called at the beginning of each frame or field picture.\n     *\n     * Meaningful frame information (codec specific) is guaranteed to\n     * be parsed at this point. This function is mandatory.\n     *\n     * Note that buf can be NULL along with buf_size set to 0.\n     * Otherwise, this means the whole frame is available at this point.\n     *\n     * @param avctx the codec context\n     * @param buf the frame data buffer base\n     * @param buf_size the size of the frame in bytes\n     * @return zero if successful, a negative value otherwise\n     */\n    int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);\n\n    /**\n     * Callback for each slice.\n     *\n     * Meaningful slice information (codec specific) is guaranteed to\n     * be parsed at this point. This function is mandatory.\n     * The only exception is XvMC, that works on MB level.\n     *\n     * @param avctx the codec context\n     * @param buf the slice data buffer base\n     * @param buf_size the size of the slice in bytes\n     * @return zero if successful, a negative value otherwise\n     */\n    int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);\n\n    /**\n     * Called at the end of each frame or field picture.\n     *\n     * The whole picture is parsed at this point and can now be sent\n     * to the hardware accelerator. This function is mandatory.\n     *\n     * @param avctx the codec context\n     * @return zero if successful, a negative value otherwise\n     */\n    int (*end_frame)(AVCodecContext *avctx);\n\n    /**\n     * Size of HW accelerator private data.\n     *\n     * Private data is allocated with av_mallocz() before\n     * AVCodecContext.get_buffer() and deallocated after\n     * AVCodecContext.release_buffer().\n     */\n    int priv_data_size;\n\n    /**\n     * Called for every Macroblock in a slice.\n     *\n     * XvMC uses it to replace the ff_MPV_decode_mb().\n     * Instead of decoding to raw picture, MB parameters are\n     * stored in an array provided by the video driver.\n     *\n     * @param s the mpeg context\n     */\n    void (*decode_mb)(struct MpegEncContext *s);\n} AVHWAccel;\n\n/**\n * @defgroup lavc_picture AVPicture\n *\n * Functions for working with AVPicture\n * @{\n */\n\n/**\n * Picture data structure.\n *\n * Up to four components can be stored into it, the last component is\n * alpha.\n */\ntypedef struct AVPicture {\n    uint8_t *data[AV_NUM_DATA_POINTERS];    ///< pointers to the image data planes\n    int linesize[AV_NUM_DATA_POINTERS];     ///< number of bytes per line\n} AVPicture;\n\n/**\n * @}\n */\n\nenum AVSubtitleType {\n    SUBTITLE_NONE,\n\n    SUBTITLE_BITMAP,                ///< A bitmap, pict will be set\n\n    /**\n     * Plain text, the text field must be set by the decoder and is\n     * authoritative. ass and pict fields may contain approximations.\n     */\n    SUBTITLE_TEXT,\n\n    /**\n     * Formatted text, the ass field must be set by the decoder and is\n     * authoritative. pict and text fields may contain approximations.\n     */\n    SUBTITLE_ASS,\n};\n\n#define AV_SUBTITLE_FLAG_FORCED 0x00000001\n\ntypedef struct AVSubtitleRect {\n    int x;         ///< top left corner  of pict, undefined when pict is not set\n    int y;         ///< top left corner  of pict, undefined when pict is not set\n    int w;         ///< width            of pict, undefined when pict is not set\n    int h;         ///< height           of pict, undefined when pict is not set\n    int nb_colors; ///< number of colors in pict, undefined when pict is not set\n\n    /**\n     * data+linesize for the bitmap of this subtitle.\n     * can be set for text/ass as well once they where rendered\n     */\n    AVPicture pict;\n    enum AVSubtitleType type;\n\n    char *text;                     ///< 0 terminated plain UTF-8 text\n\n    /**\n     * 0 terminated ASS/SSA compatible event line.\n     * The presentation of this is unaffected by the other values in this\n     * struct.\n     */\n    char *ass;\n\n    int flags;\n} AVSubtitleRect;\n\ntypedef struct AVSubtitle {\n    uint16_t format; /* 0 = graphics */\n    uint32_t start_display_time; /* relative to packet pts, in ms */\n    uint32_t end_display_time; /* relative to packet pts, in ms */\n    unsigned num_rects;\n    AVSubtitleRect **rects;\n    int64_t pts;    ///< Same as packet pts, in AV_TIME_BASE\n} AVSubtitle;\n\n/**\n * If c is NULL, returns the first registered codec,\n * if c is non-NULL, returns the next registered codec after c,\n * or NULL if c is the last one.\n */\nAVCodec *av_codec_next(const AVCodec *c);\n\n/**\n * Return the LIBAVCODEC_VERSION_INT constant.\n */\nunsigned avcodec_version(void);\n\n/**\n * Return the libavcodec build-time configuration.\n */\nconst char *avcodec_configuration(void);\n\n/**\n * Return the libavcodec license.\n */\nconst char *avcodec_license(void);\n\n/**\n * Register the codec codec and initialize libavcodec.\n *\n * @warning either this function or avcodec_register_all() must be called\n * before any other libavcodec functions.\n *\n * @see avcodec_register_all()\n */\nvoid avcodec_register(AVCodec *codec);\n\n/**\n * Register all the codecs, parsers and bitstream filters which were enabled at\n * configuration time. If you do not call this function you can select exactly\n * which formats you want to support, by using the individual registration\n * functions.\n *\n * @see avcodec_register\n * @see av_register_codec_parser\n * @see av_register_bitstream_filter\n */\nvoid avcodec_register_all(void);\n\n/**\n * Allocate an AVCodecContext and set its fields to default values.  The\n * resulting struct can be deallocated by calling avcodec_close() on it followed\n * by av_free().\n *\n * @param codec if non-NULL, allocate private data and initialize defaults\n *              for the given codec. It is illegal to then call avcodec_open2()\n *              with a different codec.\n *              If NULL, then the codec-specific defaults won't be initialized,\n *              which may result in suboptimal default settings (this is\n *              important mainly for encoders, e.g. libx264).\n *\n * @return An AVCodecContext filled with default values or NULL on failure.\n * @see avcodec_get_context_defaults\n */\nAVCodecContext *avcodec_alloc_context3(const AVCodec *codec);\n\n/**\n * Set the fields of the given AVCodecContext to default values corresponding\n * to the given codec (defaults may be codec-dependent).\n *\n * Do not call this function if a non-NULL codec has been passed\n * to avcodec_alloc_context3() that allocated this AVCodecContext.\n * If codec is non-NULL, it is illegal to call avcodec_open2() with a\n * different codec on this AVCodecContext.\n */\nint avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec);\n\n/**\n * Get the AVClass for AVCodecContext. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *avcodec_get_class(void);\n\n/**\n * Get the AVClass for AVFrame. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *avcodec_get_frame_class(void);\n\n/**\n * Get the AVClass for AVSubtitleRect. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *avcodec_get_subtitle_rect_class(void);\n\n/**\n * Copy the settings of the source AVCodecContext into the destination\n * AVCodecContext. The resulting destination codec context will be\n * unopened, i.e. you are required to call avcodec_open2() before you\n * can use this AVCodecContext to decode/encode video/audio data.\n *\n * @param dest target codec context, should be initialized with\n *             avcodec_alloc_context3(NULL), but otherwise uninitialized\n * @param src source codec context\n * @return AVERROR() on error (e.g. memory allocation error), 0 on success\n */\nint avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);\n\n#if FF_API_AVFRAME_LAVC\n/**\n * @deprecated use av_frame_alloc()\n */\nattribute_deprecated\nAVFrame *avcodec_alloc_frame(void);\n\n/**\n * Set the fields of the given AVFrame to default values.\n *\n * @param frame The AVFrame of which the fields should be set to default values.\n *\n * @deprecated use av_frame_unref()\n */\nattribute_deprecated\nvoid avcodec_get_frame_defaults(AVFrame *frame);\n\n/**\n * Free the frame and any dynamically allocated objects in it,\n * e.g. extended_data.\n *\n * @param frame frame to be freed. The pointer will be set to NULL.\n *\n * @warning this function does NOT free the data buffers themselves\n * (it does not know how, since they might have been allocated with\n *  a custom get_buffer()).\n *\n * @deprecated use av_frame_free()\n */\nattribute_deprecated\nvoid avcodec_free_frame(AVFrame **frame);\n#endif\n\n/**\n * Initialize the AVCodecContext to use the given AVCodec. Prior to using this\n * function the context has to be allocated with avcodec_alloc_context3().\n *\n * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),\n * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for\n * retrieving a codec.\n *\n * @warning This function is not thread safe!\n *\n * @code\n * avcodec_register_all();\n * av_dict_set(&opts, \"b\", \"2.5M\", 0);\n * codec = avcodec_find_decoder(AV_CODEC_ID_H264);\n * if (!codec)\n *     exit(1);\n *\n * context = avcodec_alloc_context3(codec);\n *\n * if (avcodec_open2(context, codec, opts) < 0)\n *     exit(1);\n * @endcode\n *\n * @param avctx The context to initialize.\n * @param codec The codec to open this context for. If a non-NULL codec has been\n *              previously passed to avcodec_alloc_context3() or\n *              avcodec_get_context_defaults3() for this context, then this\n *              parameter MUST be either NULL or equal to the previously passed\n *              codec.\n * @param options A dictionary filled with AVCodecContext and codec-private options.\n *                On return this object will be filled with options that were not found.\n *\n * @return zero on success, a negative value on error\n * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),\n *      av_dict_set(), av_opt_find().\n */\nint avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);\n\n/**\n * Close a given AVCodecContext and free all the data associated with it\n * (but not the AVCodecContext itself).\n *\n * Calling this function on an AVCodecContext that hasn't been opened will free\n * the codec-specific data allocated in avcodec_alloc_context3() /\n * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will\n * do nothing.\n */\nint avcodec_close(AVCodecContext *avctx);\n\n/**\n * Free all allocated data in the given subtitle struct.\n *\n * @param sub AVSubtitle to free.\n */\nvoid avsubtitle_free(AVSubtitle *sub);\n\n/**\n * @}\n */\n\n/**\n * @addtogroup lavc_packet\n * @{\n */\n\n#if FF_API_DESTRUCT_PACKET\n/**\n * Default packet destructor.\n * @deprecated use the AVBuffer API instead\n */\nattribute_deprecated\nvoid av_destruct_packet(AVPacket *pkt);\n#endif\n\n/**\n * Initialize optional fields of a packet with default values.\n *\n * Note, this does not touch the data and size members, which have to be\n * initialized separately.\n *\n * @param pkt packet\n */\nvoid av_init_packet(AVPacket *pkt);\n\n/**\n * Allocate the payload of a packet and initialize its fields with\n * default values.\n *\n * @param pkt packet\n * @param size wanted payload size\n * @return 0 if OK, AVERROR_xxx otherwise\n */\nint av_new_packet(AVPacket *pkt, int size);\n\n/**\n * Reduce packet size, correctly zeroing padding\n *\n * @param pkt packet\n * @param size new size\n */\nvoid av_shrink_packet(AVPacket *pkt, int size);\n\n/**\n * Increase packet size, correctly zeroing padding\n *\n * @param pkt packet\n * @param grow_by number of bytes by which to increase the size of the packet\n */\nint av_grow_packet(AVPacket *pkt, int grow_by);\n\n/**\n * Initialize a reference-counted packet from av_malloc()ed data.\n *\n * @param pkt packet to be initialized. This function will set the data, size,\n *        buf and destruct fields, all others are left untouched.\n * @param data Data allocated by av_malloc() to be used as packet data. If this\n *        function returns successfully, the data is owned by the underlying AVBuffer.\n *        The caller may not access the data through other means.\n * @param size size of data in bytes, without the padding. I.e. the full buffer\n *        size is assumed to be size + FF_INPUT_BUFFER_PADDING_SIZE.\n *\n * @return 0 on success, a negative AVERROR on error\n */\nint av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);\n\n/**\n * @warning This is a hack - the packet memory allocation stuff is broken. The\n * packet is allocated if it was not really allocated.\n */\nint av_dup_packet(AVPacket *pkt);\n\n/**\n * Copy packet, including contents\n *\n * @return 0 on success, negative AVERROR on fail\n */\nint av_copy_packet(AVPacket *dst, AVPacket *src);\n\n/**\n * Copy packet side data\n *\n * @return 0 on success, negative AVERROR on fail\n */\nint av_copy_packet_side_data(AVPacket *dst, AVPacket *src);\n\n/**\n * Free a packet.\n *\n * @param pkt packet to free\n */\nvoid av_free_packet(AVPacket *pkt);\n\n/**\n * Allocate new information of a packet.\n *\n * @param pkt packet\n * @param type side information type\n * @param size side information size\n * @return pointer to fresh allocated data or NULL otherwise\n */\nuint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,\n                                 int size);\n\n/**\n * Shrink the already allocated side data buffer\n *\n * @param pkt packet\n * @param type side information type\n * @param size new side information size\n * @return 0 on success, < 0 on failure\n */\nint av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,\n                               int size);\n\n/**\n * Get side information from packet.\n *\n * @param pkt packet\n * @param type desired side information type\n * @param size pointer for side information size to store (optional)\n * @return pointer to data if present or NULL otherwise\n */\nuint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,\n                                 int *size);\n\nint av_packet_merge_side_data(AVPacket *pkt);\n\nint av_packet_split_side_data(AVPacket *pkt);\n\n/**\n * Pack a dictionary for use in side_data.\n *\n * @param dict The dictionary to pack.\n * @param size pointer to store the size of the returned data\n * @return pointer to data if successful, NULL otherwise\n */\nuint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size);\n/**\n * Unpack a dictionary from side_data.\n *\n * @param data data from side_data\n * @param size size of the data\n * @param dict the metadata storage dictionary\n * @return 0 on success, < 0 on failure\n */\nint av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict);\n\n\n/**\n * Convenience function to free all the side data stored.\n * All the other fields stay untouched.\n *\n * @param pkt packet\n */\nvoid av_packet_free_side_data(AVPacket *pkt);\n\n/**\n * Setup a new reference to the data described by a given packet\n *\n * If src is reference-counted, setup dst as a new reference to the\n * buffer in src. Otherwise allocate a new buffer in dst and copy the\n * data from src into it.\n *\n * All the other fields are copied from src.\n *\n * @see av_packet_unref\n *\n * @param dst Destination packet\n * @param src Source packet\n *\n * @return 0 on success, a negative AVERROR on error.\n */\nint av_packet_ref(AVPacket *dst, AVPacket *src);\n\n/**\n * Wipe the packet.\n *\n * Unreference the buffer referenced by the packet and reset the\n * remaining packet fields to their default values.\n *\n * @param pkt The packet to be unreferenced.\n */\nvoid av_packet_unref(AVPacket *pkt);\n\n/**\n * Move every field in src to dst and reset src.\n *\n * @see av_packet_unref\n *\n * @param src Source packet, will be reset\n * @param dst Destination packet\n */\nvoid av_packet_move_ref(AVPacket *dst, AVPacket *src);\n\n/**\n * Copy only \"properties\" fields from src to dst.\n *\n * Properties for the purpose of this function are all the fields\n * beside those related to the packet data (buf, data, size)\n *\n * @param dst Destination packet\n * @param src Source packet\n *\n * @return 0 on success AVERROR on failure.\n *\n */\nint av_packet_copy_props(AVPacket *dst, const AVPacket *src);\n\n/**\n * @}\n */\n\n/**\n * @addtogroup lavc_decoding\n * @{\n */\n\n/**\n * Find a registered decoder with a matching codec ID.\n *\n * @param id AVCodecID of the requested decoder\n * @return A decoder if one was found, NULL otherwise.\n */\nAVCodec *avcodec_find_decoder(enum AVCodecID id);\n\n/**\n * Find a registered decoder with the specified name.\n *\n * @param name name of the requested decoder\n * @return A decoder if one was found, NULL otherwise.\n */\nAVCodec *avcodec_find_decoder_by_name(const char *name);\n\n#if FF_API_GET_BUFFER\nattribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);\nattribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);\nattribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);\n#endif\n\n/**\n * The default callback for AVCodecContext.get_buffer2(). It is made public so\n * it can be called by custom get_buffer2() implementations for decoders without\n * CODEC_CAP_DR1 set.\n */\nint avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);\n\n#if FF_API_EMU_EDGE\n/**\n * Return the amount of padding in pixels which the get_buffer callback must\n * provide around the edge of the image for codecs which do not have the\n * CODEC_FLAG_EMU_EDGE flag.\n *\n * @return Required padding in pixels.\n *\n * @deprecated CODEC_FLAG_EMU_EDGE is deprecated, so this function is no longer\n * needed\n */\nattribute_deprecated\nunsigned avcodec_get_edge_width(void);\n#endif\n\n/**\n * Modify width and height values so that they will result in a memory\n * buffer that is acceptable for the codec if you do not use any horizontal\n * padding.\n *\n * May only be used if a codec with CODEC_CAP_DR1 has been opened.\n */\nvoid avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);\n\n/**\n * Modify width and height values so that they will result in a memory\n * buffer that is acceptable for the codec if you also ensure that all\n * line sizes are a multiple of the respective linesize_align[i].\n *\n * May only be used if a codec with CODEC_CAP_DR1 has been opened.\n */\nvoid avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,\n                               int linesize_align[AV_NUM_DATA_POINTERS]);\n\n/**\n * Converts AVChromaLocation to swscale x/y chroma position.\n *\n * The positions represent the chroma (0,0) position in a coordinates system\n * with luma (0,0) representing the origin and luma(1,1) representing 256,256\n *\n * @param xpos  horizontal chroma sample position\n * @param ypos  vertical   chroma sample position\n */\nint avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos);\n\n/**\n * Converts swscale x/y chroma position to AVChromaLocation.\n *\n * The positions represent the chroma (0,0) position in a coordinates system\n * with luma (0,0) representing the origin and luma(1,1) representing 256,256\n *\n * @param xpos  horizontal chroma sample position\n * @param ypos  vertical   chroma sample position\n */\nenum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos);\n\n#if FF_API_OLD_DECODE_AUDIO\n/**\n * Wrapper function which calls avcodec_decode_audio4.\n *\n * @deprecated Use avcodec_decode_audio4 instead.\n *\n * Decode the audio frame of size avpkt->size from avpkt->data into samples.\n * Some decoders may support multiple frames in a single AVPacket, such\n * decoders would then just decode the first frame. In this case,\n * avcodec_decode_audio3 has to be called again with an AVPacket that contains\n * the remaining data in order to decode the second frame etc.\n * If no frame\n * could be outputted, frame_size_ptr is zero. Otherwise, it is the\n * decompressed frame size in bytes.\n *\n * @warning You must set frame_size_ptr to the allocated size of the\n * output buffer before calling avcodec_decode_audio3().\n *\n * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than\n * the actual read bytes because some optimized bitstream readers read 32 or 64\n * bits at once and could read over the end.\n *\n * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that\n * no overreading happens for damaged MPEG streams.\n *\n * @warning You must not provide a custom get_buffer() when using\n * avcodec_decode_audio3().  Doing so will override it with\n * avcodec_default_get_buffer.  Use avcodec_decode_audio4() instead,\n * which does allow the application to provide a custom get_buffer().\n *\n * @note You might have to align the input buffer avpkt->data and output buffer\n * samples. The alignment requirements depend on the CPU: On some CPUs it isn't\n * necessary at all, on others it won't work at all if not aligned and on others\n * it will work but it will have an impact on performance.\n *\n * In practice, avpkt->data should have 4 byte alignment at minimum and\n * samples should be 16 byte aligned unless the CPU doesn't need it\n * (AltiVec and SSE do).\n *\n * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay\n * between input and output, these need to be fed with avpkt->data=NULL,\n * avpkt->size=0 at the end to return the remaining frames.\n *\n * @param avctx the codec context\n * @param[out] samples the output buffer, sample type in avctx->sample_fmt\n *                     If the sample format is planar, each channel plane will\n *                     be the same size, with no padding between channels.\n * @param[in,out] frame_size_ptr the output buffer size in bytes\n * @param[in] avpkt The input AVPacket containing the input buffer.\n *            You can create such packet with av_init_packet() and by then setting\n *            data and size, some decoders might in addition need other fields.\n *            All decoders are designed to use the least fields possible though.\n * @return On error a negative value is returned, otherwise the number of bytes\n * used or zero if no frame data was decompressed (used) from the input AVPacket.\n */\nattribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,\n                         int *frame_size_ptr,\n                         AVPacket *avpkt);\n#endif\n\n/**\n * Decode the audio frame of size avpkt->size from avpkt->data into frame.\n *\n * Some decoders may support multiple frames in a single AVPacket. Such\n * decoders would then just decode the first frame and the return value would be\n * less than the packet size. In this case, avcodec_decode_audio4 has to be\n * called again with an AVPacket containing the remaining data in order to\n * decode the second frame, etc...  Even if no frames are returned, the packet\n * needs to be fed to the decoder with remaining data until it is completely\n * consumed or an error occurs.\n *\n * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input\n * and output. This means that for some packets they will not immediately\n * produce decoded output and need to be flushed at the end of decoding to get\n * all the decoded data. Flushing is done by calling this function with packets\n * with avpkt->data set to NULL and avpkt->size set to 0 until it stops\n * returning samples. It is safe to flush even those decoders that are not\n * marked with CODEC_CAP_DELAY, then no samples will be returned.\n *\n * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE\n *          larger than the actual read bytes because some optimized bitstream\n *          readers read 32 or 64 bits at once and could read over the end.\n *\n * @param      avctx the codec context\n * @param[out] frame The AVFrame in which to store decoded audio samples.\n *                   The decoder will allocate a buffer for the decoded frame by\n *                   calling the AVCodecContext.get_buffer2() callback.\n *                   When AVCodecContext.refcounted_frames is set to 1, the frame is\n *                   reference counted and the returned reference belongs to the\n *                   caller. The caller must release the frame using av_frame_unref()\n *                   when the frame is no longer needed. The caller may safely write\n *                   to the frame if av_frame_is_writable() returns 1.\n *                   When AVCodecContext.refcounted_frames is set to 0, the returned\n *                   reference belongs to the decoder and is valid only until the\n *                   next call to this function or until closing or flushing the\n *                   decoder. The caller may not write to it.\n * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is\n *                           non-zero. Note that this field being set to zero\n *                           does not mean that an error has occurred. For\n *                           decoders with CODEC_CAP_DELAY set, no given decode\n *                           call is guaranteed to produce a frame.\n * @param[in]  avpkt The input AVPacket containing the input buffer.\n *                   At least avpkt->data and avpkt->size should be set. Some\n *                   decoders might also require additional fields to be set.\n * @return A negative error code is returned if an error occurred during\n *         decoding, otherwise the number of bytes consumed from the input\n *         AVPacket is returned.\n */\nint avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,\n                          int *got_frame_ptr, const AVPacket *avpkt);\n\n/**\n * Decode the video frame of size avpkt->size from avpkt->data into picture.\n * Some decoders may support multiple frames in a single AVPacket, such\n * decoders would then just decode the first frame.\n *\n * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than\n * the actual read bytes because some optimized bitstream readers read 32 or 64\n * bits at once and could read over the end.\n *\n * @warning The end of the input buffer buf should be set to 0 to ensure that\n * no overreading happens for damaged MPEG streams.\n *\n * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay\n * between input and output, these need to be fed with avpkt->data=NULL,\n * avpkt->size=0 at the end to return the remaining frames.\n *\n * @param avctx the codec context\n * @param[out] picture The AVFrame in which the decoded video frame will be stored.\n *             Use av_frame_alloc() to get an AVFrame. The codec will\n *             allocate memory for the actual bitmap by calling the\n *             AVCodecContext.get_buffer2() callback.\n *             When AVCodecContext.refcounted_frames is set to 1, the frame is\n *             reference counted and the returned reference belongs to the\n *             caller. The caller must release the frame using av_frame_unref()\n *             when the frame is no longer needed. The caller may safely write\n *             to the frame if av_frame_is_writable() returns 1.\n *             When AVCodecContext.refcounted_frames is set to 0, the returned\n *             reference belongs to the decoder and is valid only until the\n *             next call to this function or until closing or flushing the\n *             decoder. The caller may not write to it.\n *\n * @param[in] avpkt The input AVPacket containing the input buffer.\n *            You can create such packet with av_init_packet() and by then setting\n *            data and size, some decoders might in addition need other fields like\n *            flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least\n *            fields possible.\n * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.\n * @return On error a negative value is returned, otherwise the number of bytes\n * used or zero if no frame could be decompressed.\n */\nint avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,\n                         int *got_picture_ptr,\n                         const AVPacket *avpkt);\n\n/**\n * Decode a subtitle message.\n * Return a negative value on error, otherwise return the number of bytes used.\n * If no subtitle could be decompressed, got_sub_ptr is zero.\n * Otherwise, the subtitle is stored in *sub.\n * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for\n * simplicity, because the performance difference is expect to be negligible\n * and reusing a get_buffer written for video codecs would probably perform badly\n * due to a potentially very different allocation pattern.\n *\n * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input\n * and output. This means that for some packets they will not immediately\n * produce decoded output and need to be flushed at the end of decoding to get\n * all the decoded data. Flushing is done by calling this function with packets\n * with avpkt->data set to NULL and avpkt->size set to 0 until it stops\n * returning subtitles. It is safe to flush even those decoders that are not\n * marked with CODEC_CAP_DELAY, then no subtitles will be returned.\n *\n * @param avctx the codec context\n * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be\n                   freed with avsubtitle_free if *got_sub_ptr is set.\n * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.\n * @param[in] avpkt The input AVPacket containing the input buffer.\n */\nint avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,\n                            int *got_sub_ptr,\n                            AVPacket *avpkt);\n\n/**\n * @defgroup lavc_parsing Frame parsing\n * @{\n */\n\nenum AVPictureStructure {\n    AV_PICTURE_STRUCTURE_UNKNOWN,      //< unknown\n    AV_PICTURE_STRUCTURE_TOP_FIELD,    //< coded as top field\n    AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field\n    AV_PICTURE_STRUCTURE_FRAME,        //< coded as frame\n};\n\ntypedef struct AVCodecParserContext {\n    void *priv_data;\n    struct AVCodecParser *parser;\n    int64_t frame_offset; /* offset of the current frame */\n    int64_t cur_offset; /* current offset\n                           (incremented by each av_parser_parse()) */\n    int64_t next_frame_offset; /* offset of the next frame */\n    /* video info */\n    int pict_type; /* XXX: Put it back in AVCodecContext. */\n    /**\n     * This field is used for proper frame duration computation in lavf.\n     * It signals, how much longer the frame duration of the current frame\n     * is compared to normal frame duration.\n     *\n     * frame_duration = (1 + repeat_pict) * time_base\n     *\n     * It is used by codecs like H.264 to display telecined material.\n     */\n    int repeat_pict; /* XXX: Put it back in AVCodecContext. */\n    int64_t pts;     /* pts of the current frame */\n    int64_t dts;     /* dts of the current frame */\n\n    /* private data */\n    int64_t last_pts;\n    int64_t last_dts;\n    int fetch_timestamp;\n\n#define AV_PARSER_PTS_NB 4\n    int cur_frame_start_index;\n    int64_t cur_frame_offset[AV_PARSER_PTS_NB];\n    int64_t cur_frame_pts[AV_PARSER_PTS_NB];\n    int64_t cur_frame_dts[AV_PARSER_PTS_NB];\n\n    int flags;\n#define PARSER_FLAG_COMPLETE_FRAMES           0x0001\n#define PARSER_FLAG_ONCE                      0x0002\n/// Set if the parser has a valid file offset\n#define PARSER_FLAG_FETCHED_OFFSET            0x0004\n#define PARSER_FLAG_USE_CODEC_TS              0x1000\n\n    int64_t offset;      ///< byte offset from starting packet start\n    int64_t cur_frame_end[AV_PARSER_PTS_NB];\n\n    /**\n     * Set by parser to 1 for key frames and 0 for non-key frames.\n     * It is initialized to -1, so if the parser doesn't set this flag,\n     * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames\n     * will be used.\n     */\n    int key_frame;\n\n    /**\n     * Time difference in stream time base units from the pts of this\n     * packet to the point at which the output from the decoder has converged\n     * independent from the availability of previous frames. That is, the\n     * frames are virtually identical no matter if decoding started from\n     * the very first frame or from this keyframe.\n     * Is AV_NOPTS_VALUE if unknown.\n     * This field is not the display duration of the current frame.\n     * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY\n     * set.\n     *\n     * The purpose of this field is to allow seeking in streams that have no\n     * keyframes in the conventional sense. It corresponds to the\n     * recovery point SEI in H.264 and match_time_delta in NUT. It is also\n     * essential for some types of subtitle streams to ensure that all\n     * subtitles are correctly displayed after seeking.\n     */\n    int64_t convergence_duration;\n\n    // Timestamp generation support:\n    /**\n     * Synchronization point for start of timestamp generation.\n     *\n     * Set to >0 for sync point, 0 for no sync point and <0 for undefined\n     * (default).\n     *\n     * For example, this corresponds to presence of H.264 buffering period\n     * SEI message.\n     */\n    int dts_sync_point;\n\n    /**\n     * Offset of the current timestamp against last timestamp sync point in\n     * units of AVCodecContext.time_base.\n     *\n     * Set to INT_MIN when dts_sync_point unused. Otherwise, it must\n     * contain a valid timestamp offset.\n     *\n     * Note that the timestamp of sync point has usually a nonzero\n     * dts_ref_dts_delta, which refers to the previous sync point. Offset of\n     * the next frame after timestamp sync point will be usually 1.\n     *\n     * For example, this corresponds to H.264 cpb_removal_delay.\n     */\n    int dts_ref_dts_delta;\n\n    /**\n     * Presentation delay of current frame in units of AVCodecContext.time_base.\n     *\n     * Set to INT_MIN when dts_sync_point unused. Otherwise, it must\n     * contain valid non-negative timestamp delta (presentation time of a frame\n     * must not lie in the past).\n     *\n     * This delay represents the difference between decoding and presentation\n     * time of the frame.\n     *\n     * For example, this corresponds to H.264 dpb_output_delay.\n     */\n    int pts_dts_delta;\n\n    /**\n     * Position of the packet in file.\n     *\n     * Analogous to cur_frame_pts/dts\n     */\n    int64_t cur_frame_pos[AV_PARSER_PTS_NB];\n\n    /**\n     * Byte position of currently parsed frame in stream.\n     */\n    int64_t pos;\n\n    /**\n     * Previous frame byte position.\n     */\n    int64_t last_pos;\n\n    /**\n     * Duration of the current frame.\n     * For audio, this is in units of 1 / AVCodecContext.sample_rate.\n     * For all other types, this is in units of AVCodecContext.time_base.\n     */\n    int duration;\n\n    enum AVFieldOrder field_order;\n\n    /**\n     * Indicate whether a picture is coded as a frame, top field or bottom field.\n     *\n     * For example, H.264 field_pic_flag equal to 0 corresponds to\n     * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag\n     * equal to 1 and bottom_field_flag equal to 0 corresponds to\n     * AV_PICTURE_STRUCTURE_TOP_FIELD.\n     */\n    enum AVPictureStructure picture_structure;\n\n    /**\n     * Picture number incremented in presentation or output order.\n     * This field may be reinitialized at the first picture of a new sequence.\n     *\n     * For example, this corresponds to H.264 PicOrderCnt.\n     */\n    int output_picture_number;\n} AVCodecParserContext;\n\ntypedef struct AVCodecParser {\n    int codec_ids[5]; /* several codec IDs are permitted */\n    int priv_data_size;\n    int (*parser_init)(AVCodecParserContext *s);\n    int (*parser_parse)(AVCodecParserContext *s,\n                        AVCodecContext *avctx,\n                        const uint8_t **poutbuf, int *poutbuf_size,\n                        const uint8_t *buf, int buf_size);\n    void (*parser_close)(AVCodecParserContext *s);\n    int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);\n    struct AVCodecParser *next;\n} AVCodecParser;\n\nAVCodecParser *av_parser_next(AVCodecParser *c);\n\nvoid av_register_codec_parser(AVCodecParser *parser);\nAVCodecParserContext *av_parser_init(int codec_id);\n\n/**\n * Parse a packet.\n *\n * @param s             parser context.\n * @param avctx         codec context.\n * @param poutbuf       set to pointer to parsed buffer or NULL if not yet finished.\n * @param poutbuf_size  set to size of parsed buffer or zero if not yet finished.\n * @param buf           input buffer.\n * @param buf_size      input length, to signal EOF, this should be 0 (so that the last frame can be output).\n * @param pts           input presentation timestamp.\n * @param dts           input decoding timestamp.\n * @param pos           input byte position in stream.\n * @return the number of bytes of the input bitstream used.\n *\n * Example:\n * @code\n *   while(in_len){\n *       len = av_parser_parse2(myparser, AVCodecContext, &data, &size,\n *                                        in_data, in_len,\n *                                        pts, dts, pos);\n *       in_data += len;\n *       in_len  -= len;\n *\n *       if(size)\n *          decode_frame(data, size);\n *   }\n * @endcode\n */\nint av_parser_parse2(AVCodecParserContext *s,\n                     AVCodecContext *avctx,\n                     uint8_t **poutbuf, int *poutbuf_size,\n                     const uint8_t *buf, int buf_size,\n                     int64_t pts, int64_t dts,\n                     int64_t pos);\n\n/**\n * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed\n * @deprecated use AVBitStreamFilter\n */\nint av_parser_change(AVCodecParserContext *s,\n                     AVCodecContext *avctx,\n                     uint8_t **poutbuf, int *poutbuf_size,\n                     const uint8_t *buf, int buf_size, int keyframe);\nvoid av_parser_close(AVCodecParserContext *s);\n\n/**\n * @}\n * @}\n */\n\n/**\n * @addtogroup lavc_encoding\n * @{\n */\n\n/**\n * Find a registered encoder with a matching codec ID.\n *\n * @param id AVCodecID of the requested encoder\n * @return An encoder if one was found, NULL otherwise.\n */\nAVCodec *avcodec_find_encoder(enum AVCodecID id);\n\n/**\n * Find a registered encoder with the specified name.\n *\n * @param name name of the requested encoder\n * @return An encoder if one was found, NULL otherwise.\n */\nAVCodec *avcodec_find_encoder_by_name(const char *name);\n\n#if FF_API_OLD_ENCODE_AUDIO\n/**\n * Encode an audio frame from samples into buf.\n *\n * @deprecated Use avcodec_encode_audio2 instead.\n *\n * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.\n * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user\n * will know how much space is needed because it depends on the value passed\n * in buf_size as described below. In that case a lower value can be used.\n *\n * @param avctx the codec context\n * @param[out] buf the output buffer\n * @param[in] buf_size the output buffer size\n * @param[in] samples the input buffer containing the samples\n * The number of samples read from this buffer is frame_size*channels,\n * both of which are defined in avctx.\n * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of\n * samples read from samples is equal to:\n * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id))\n * This also implies that av_get_bits_per_sample() must not return 0 for these\n * codecs.\n * @return On error a negative value is returned, on success zero or the number\n * of bytes used to encode the data read from the input buffer.\n */\nint attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,\n                                              uint8_t *buf, int buf_size,\n                                              const short *samples);\n#endif\n\n/**\n * Encode a frame of audio.\n *\n * Takes input samples from frame and writes the next output packet, if\n * available, to avpkt. The output packet does not necessarily contain data for\n * the most recent frame, as encoders can delay, split, and combine input frames\n * internally as needed.\n *\n * @param avctx     codec context\n * @param avpkt     output AVPacket.\n *                  The user can supply an output buffer by setting\n *                  avpkt->data and avpkt->size prior to calling the\n *                  function, but if the size of the user-provided data is not\n *                  large enough, encoding will fail. If avpkt->data and\n *                  avpkt->size are set, avpkt->destruct must also be set. All\n *                  other AVPacket fields will be reset by the encoder using\n *                  av_init_packet(). If avpkt->data is NULL, the encoder will\n *                  allocate it. The encoder will set avpkt->size to the size\n *                  of the output packet.\n *\n *                  If this function fails or produces no output, avpkt will be\n *                  freed using av_free_packet() (i.e. avpkt->destruct will be\n *                  called to free the user supplied buffer).\n * @param[in] frame AVFrame containing the raw audio data to be encoded.\n *                  May be NULL when flushing an encoder that has the\n *                  CODEC_CAP_DELAY capability set.\n *                  If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame\n *                  can have any number of samples.\n *                  If it is not set, frame->nb_samples must be equal to\n *                  avctx->frame_size for all frames except the last.\n *                  The final frame may be smaller than avctx->frame_size.\n * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the\n *                            output packet is non-empty, and to 0 if it is\n *                            empty. If the function returns an error, the\n *                            packet can be assumed to be invalid, and the\n *                            value of got_packet_ptr is undefined and should\n *                            not be used.\n * @return          0 on success, negative error code on failure\n */\nint avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,\n                          const AVFrame *frame, int *got_packet_ptr);\n\n#if FF_API_OLD_ENCODE_VIDEO\n/**\n * @deprecated use avcodec_encode_video2() instead.\n *\n * Encode a video frame from pict into buf.\n * The input picture should be\n * stored using a specific format, namely avctx.pix_fmt.\n *\n * @param avctx the codec context\n * @param[out] buf the output buffer for the bitstream of encoded frame\n * @param[in] buf_size the size of the output buffer in bytes\n * @param[in] pict the input picture to encode\n * @return On error a negative value is returned, on success zero or the number\n * of bytes used from the output buffer.\n */\nattribute_deprecated\nint avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,\n                         const AVFrame *pict);\n#endif\n\n/**\n * Encode a frame of video.\n *\n * Takes input raw video data from frame and writes the next output packet, if\n * available, to avpkt. The output packet does not necessarily contain data for\n * the most recent frame, as encoders can delay and reorder input frames\n * internally as needed.\n *\n * @param avctx     codec context\n * @param avpkt     output AVPacket.\n *                  The user can supply an output buffer by setting\n *                  avpkt->data and avpkt->size prior to calling the\n *                  function, but if the size of the user-provided data is not\n *                  large enough, encoding will fail. All other AVPacket fields\n *                  will be reset by the encoder using av_init_packet(). If\n *                  avpkt->data is NULL, the encoder will allocate it.\n *                  The encoder will set avpkt->size to the size of the\n *                  output packet. The returned data (if any) belongs to the\n *                  caller, he is responsible for freeing it.\n *\n *                  If this function fails or produces no output, avpkt will be\n *                  freed using av_free_packet() (i.e. avpkt->destruct will be\n *                  called to free the user supplied buffer).\n * @param[in] frame AVFrame containing the raw video data to be encoded.\n *                  May be NULL when flushing an encoder that has the\n *                  CODEC_CAP_DELAY capability set.\n * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the\n *                            output packet is non-empty, and to 0 if it is\n *                            empty. If the function returns an error, the\n *                            packet can be assumed to be invalid, and the\n *                            value of got_packet_ptr is undefined and should\n *                            not be used.\n * @return          0 on success, negative error code on failure\n */\nint avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,\n                          const AVFrame *frame, int *got_packet_ptr);\n\nint avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,\n                            const AVSubtitle *sub);\n\n\n/**\n * @}\n */\n\n#if FF_API_AVCODEC_RESAMPLE\n/**\n * @defgroup lavc_resample Audio resampling\n * @ingroup libavc\n * @deprecated use libswresample instead\n *\n * @{\n */\nstruct ReSampleContext;\nstruct AVResampleContext;\n\ntypedef struct ReSampleContext ReSampleContext;\n\n/**\n *  Initialize audio resampling context.\n *\n * @param output_channels  number of output channels\n * @param input_channels   number of input channels\n * @param output_rate      output sample rate\n * @param input_rate       input sample rate\n * @param sample_fmt_out   requested output sample format\n * @param sample_fmt_in    input sample format\n * @param filter_length    length of each FIR filter in the filterbank relative to the cutoff frequency\n * @param log2_phase_count log2 of the number of entries in the polyphase filterbank\n * @param linear           if 1 then the used FIR filter will be linearly interpolated\n                           between the 2 closest, if 0 the closest will be used\n * @param cutoff           cutoff frequency, 1.0 corresponds to half the output sampling rate\n * @return allocated ReSampleContext, NULL if error occurred\n */\nattribute_deprecated\nReSampleContext *av_audio_resample_init(int output_channels, int input_channels,\n                                        int output_rate, int input_rate,\n                                        enum AVSampleFormat sample_fmt_out,\n                                        enum AVSampleFormat sample_fmt_in,\n                                        int filter_length, int log2_phase_count,\n                                        int linear, double cutoff);\n\nattribute_deprecated\nint audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples);\n\n/**\n * Free resample context.\n *\n * @param s a non-NULL pointer to a resample context previously\n *          created with av_audio_resample_init()\n */\nattribute_deprecated\nvoid audio_resample_close(ReSampleContext *s);\n\n\n/**\n * Initialize an audio resampler.\n * Note, if either rate is not an integer then simply scale both rates up so they are.\n * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq\n * @param log2_phase_count log2 of the number of entries in the polyphase filterbank\n * @param linear If 1 then the used FIR filter will be linearly interpolated\n                 between the 2 closest, if 0 the closest will be used\n * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate\n */\nattribute_deprecated\nstruct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff);\n\n/**\n * Resample an array of samples using a previously configured context.\n * @param src an array of unconsumed samples\n * @param consumed the number of samples of src which have been consumed are returned here\n * @param src_size the number of unconsumed samples available\n * @param dst_size the amount of space in samples available in dst\n * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.\n * @return the number of samples written in dst or -1 if an error occurred\n */\nattribute_deprecated\nint av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx);\n\n\n/**\n * Compensate samplerate/timestamp drift. The compensation is done by changing\n * the resampler parameters, so no audible clicks or similar distortions occur\n * @param compensation_distance distance in output samples over which the compensation should be performed\n * @param sample_delta number of output samples which should be output less\n *\n * example: av_resample_compensate(c, 10, 500)\n * here instead of 510 samples only 500 samples would be output\n *\n * note, due to rounding the actual compensation might be slightly different,\n * especially if the compensation_distance is large and the in_rate used during init is small\n */\nattribute_deprecated\nvoid av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance);\nattribute_deprecated\nvoid av_resample_close(struct AVResampleContext *c);\n\n/**\n * @}\n */\n#endif\n\n/**\n * @addtogroup lavc_picture\n * @{\n */\n\n/**\n * Allocate memory for the pixels of a picture and setup the AVPicture\n * fields for it.\n *\n * Call avpicture_free() to free it.\n *\n * @param picture            the picture structure to be filled in\n * @param pix_fmt            the pixel format of the picture\n * @param width              the width of the picture\n * @param height             the height of the picture\n * @return zero if successful, a negative error code otherwise\n *\n * @see av_image_alloc(), avpicture_fill()\n */\nint avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height);\n\n/**\n * Free a picture previously allocated by avpicture_alloc().\n * The data buffer used by the AVPicture is freed, but the AVPicture structure\n * itself is not.\n *\n * @param picture the AVPicture to be freed\n */\nvoid avpicture_free(AVPicture *picture);\n\n/**\n * Setup the picture fields based on the specified image parameters\n * and the provided image data buffer.\n *\n * The picture fields are filled in by using the image data buffer\n * pointed to by ptr.\n *\n * If ptr is NULL, the function will fill only the picture linesize\n * array and return the required size for the image buffer.\n *\n * To allocate an image buffer and fill the picture data in one call,\n * use avpicture_alloc().\n *\n * @param picture       the picture to be filled in\n * @param ptr           buffer where the image data is stored, or NULL\n * @param pix_fmt       the pixel format of the image\n * @param width         the width of the image in pixels\n * @param height        the height of the image in pixels\n * @return the size in bytes required for src, a negative error code\n * in case of failure\n *\n * @see av_image_fill_arrays()\n */\nint avpicture_fill(AVPicture *picture, const uint8_t *ptr,\n                   enum AVPixelFormat pix_fmt, int width, int height);\n\n/**\n * Copy pixel data from an AVPicture into a buffer.\n *\n * avpicture_get_size() can be used to compute the required size for\n * the buffer to fill.\n *\n * @param src        source picture with filled data\n * @param pix_fmt    picture pixel format\n * @param width      picture width\n * @param height     picture height\n * @param dest       destination buffer\n * @param dest_size  destination buffer size in bytes\n * @return the number of bytes written to dest, or a negative value\n * (error code) on error, for example if the destination buffer is not\n * big enough\n *\n * @see av_image_copy_to_buffer()\n */\nint avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt,\n                     int width, int height,\n                     unsigned char *dest, int dest_size);\n\n/**\n * Calculate the size in bytes that a picture of the given width and height\n * would occupy if stored in the given picture format.\n *\n * @param pix_fmt    picture pixel format\n * @param width      picture width\n * @param height     picture height\n * @return the computed picture buffer size or a negative error code\n * in case of error\n *\n * @see av_image_get_buffer_size().\n */\nint avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);\n\n#if FF_API_DEINTERLACE\n/**\n *  deinterlace - if not supported return -1\n *\n * @deprecated - use yadif (in libavfilter) instead\n */\nattribute_deprecated\nint avpicture_deinterlace(AVPicture *dst, const AVPicture *src,\n                          enum AVPixelFormat pix_fmt, int width, int height);\n#endif\n/**\n * Copy image src to dst. Wraps av_image_copy().\n */\nvoid av_picture_copy(AVPicture *dst, const AVPicture *src,\n                     enum AVPixelFormat pix_fmt, int width, int height);\n\n/**\n * Crop image top and left side.\n */\nint av_picture_crop(AVPicture *dst, const AVPicture *src,\n                    enum AVPixelFormat pix_fmt, int top_band, int left_band);\n\n/**\n * Pad image.\n */\nint av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt,\n            int padtop, int padbottom, int padleft, int padright, int *color);\n\n/**\n * @}\n */\n\n/**\n * @defgroup lavc_misc Utility functions\n * @ingroup libavc\n *\n * Miscellaneous utility functions related to both encoding and decoding\n * (or neither).\n * @{\n */\n\n/**\n * @defgroup lavc_misc_pixfmt Pixel formats\n *\n * Functions for working with pixel formats.\n * @{\n */\n\n/**\n * Utility function to access log2_chroma_w log2_chroma_h from\n * the pixel format AVPixFmtDescriptor.\n *\n * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample\n * for one that returns a failure code and continues in case of invalid\n * pix_fmts.\n *\n * @param[in]  pix_fmt the pixel format\n * @param[out] h_shift store log2_chroma_w\n * @param[out] v_shift store log2_chroma_h\n *\n * @see av_pix_fmt_get_chroma_sub_sample\n */\n\nvoid avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift);\n\n/**\n * Return a value representing the fourCC code associated to the\n * pixel format pix_fmt, or 0 if no associated fourCC code can be\n * found.\n */\nunsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);\n\n#define FF_LOSS_RESOLUTION  0x0001 /**< loss due to resolution change */\n#define FF_LOSS_DEPTH       0x0002 /**< loss due to color depth change */\n#define FF_LOSS_COLORSPACE  0x0004 /**< loss due to color space conversion */\n#define FF_LOSS_ALPHA       0x0008 /**< loss of alpha bits */\n#define FF_LOSS_COLORQUANT  0x0010 /**< loss due to color quantization */\n#define FF_LOSS_CHROMA      0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */\n\n/**\n * Compute what kind of losses will occur when converting from one specific\n * pixel format to another.\n * When converting from one pixel format to another, information loss may occur.\n * For example, when converting from RGB24 to GRAY, the color information will\n * be lost. Similarly, other losses occur when converting from some formats to\n * other formats. These losses can involve loss of chroma, but also loss of\n * resolution, loss of color depth, loss due to the color space conversion, loss\n * of the alpha bits or loss due to color quantization.\n * avcodec_get_fix_fmt_loss() informs you about the various types of losses\n * which will occur when converting from one pixel format to another.\n *\n * @param[in] dst_pix_fmt destination pixel format\n * @param[in] src_pix_fmt source pixel format\n * @param[in] has_alpha Whether the source pixel format alpha channel is used.\n * @return Combination of flags informing you what kind of losses will occur\n * (maximum loss for an invalid dst_pix_fmt).\n */\nint avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt,\n                             int has_alpha);\n\n/**\n * Find the best pixel format to convert to given a certain source pixel\n * format.  When converting from one pixel format to another, information loss\n * may occur.  For example, when converting from RGB24 to GRAY, the color\n * information will be lost. Similarly, other losses occur when converting from\n * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of\n * the given pixel formats should be used to suffer the least amount of loss.\n * The pixel formats from which it chooses one, are determined by the\n * pix_fmt_list parameter.\n *\n *\n * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from\n * @param[in] src_pix_fmt source pixel format\n * @param[in] has_alpha Whether the source pixel format alpha channel is used.\n * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.\n * @return The best pixel format to convert to or -1 if none was found.\n */\nenum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list,\n                                            enum AVPixelFormat src_pix_fmt,\n                                            int has_alpha, int *loss_ptr);\n\n/**\n * Find the best pixel format to convert to given a certain source pixel\n * format and a selection of two destination pixel formats. When converting from\n * one pixel format to another, information loss may occur.  For example, when converting\n * from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when\n * converting from some formats to other formats. avcodec_find_best_pix_fmt_of_2() selects which of\n * the given pixel formats should be used to suffer the least amount of loss.\n *\n * If one of the destination formats is AV_PIX_FMT_NONE the other pixel format (if valid) will be\n * returned.\n *\n * @code\n * src_pix_fmt = AV_PIX_FMT_YUV420P;\n * dst_pix_fmt1= AV_PIX_FMT_RGB24;\n * dst_pix_fmt2= AV_PIX_FMT_GRAY8;\n * dst_pix_fmt3= AV_PIX_FMT_RGB8;\n * loss= FF_LOSS_CHROMA; // don't care about chroma loss, so chroma loss will be ignored.\n * dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, alpha, &loss);\n * dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt, dst_pix_fmt3, src_pix_fmt, alpha, &loss);\n * @endcode\n *\n * @param[in] dst_pix_fmt1 One of the two destination pixel formats to choose from\n * @param[in] dst_pix_fmt2 The other of the two destination pixel formats to choose from\n * @param[in] src_pix_fmt Source pixel format\n * @param[in] has_alpha Whether the source pixel format alpha channel is used.\n * @param[in, out] loss_ptr Combination of loss flags. In: selects which of the losses to ignore, i.e.\n *                               NULL or value of zero means we care about all losses. Out: the loss\n *                               that occurs when converting from src to selected dst pixel format.\n * @return The best pixel format to convert to or -1 if none was found.\n */\nenum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,\n                                            enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);\n\nattribute_deprecated\n#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI\nenum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list,\n                                              enum AVPixelFormat src_pix_fmt,\n                                              int has_alpha, int *loss_ptr);\n#else\nenum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,\n                                            enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);\n#endif\n\n\nenum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt);\n\n/**\n * @}\n */\n\n#if FF_API_SET_DIMENSIONS\n/**\n * @deprecated this function is not supposed to be used from outside of lavc\n */\nattribute_deprecated\nvoid avcodec_set_dimensions(AVCodecContext *s, int width, int height);\n#endif\n\n/**\n * Put a string representing the codec tag codec_tag in buf.\n *\n * @param buf       buffer to place codec tag in\n * @param buf_size size in bytes of buf\n * @param codec_tag codec tag to assign\n * @return the length of the string that would have been generated if\n * enough space had been available, excluding the trailing null\n */\nsize_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);\n\nvoid avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);\n\n/**\n * Return a name for the specified profile, if available.\n *\n * @param codec the codec that is searched for the given profile\n * @param profile the profile value for which a name is requested\n * @return A name for the profile if found, NULL otherwise.\n */\nconst char *av_get_profile_name(const AVCodec *codec, int profile);\n\nint avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);\nint avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);\n//FIXME func typedef\n\n/**\n * Fill AVFrame audio data and linesize pointers.\n *\n * The buffer buf must be a preallocated buffer with a size big enough\n * to contain the specified samples amount. The filled AVFrame data\n * pointers will point to this buffer.\n *\n * AVFrame extended_data channel pointers are allocated if necessary for\n * planar audio.\n *\n * @param frame       the AVFrame\n *                    frame->nb_samples must be set prior to calling the\n *                    function. This function fills in frame->data,\n *                    frame->extended_data, frame->linesize[0].\n * @param nb_channels channel count\n * @param sample_fmt  sample format\n * @param buf         buffer to use for frame data\n * @param buf_size    size of buffer\n * @param align       plane size sample alignment (0 = default)\n * @return            >=0 on success, negative error code on failure\n * @todo return the size in bytes required to store the samples in\n * case of success, at the next libavutil bump\n */\nint avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,\n                             enum AVSampleFormat sample_fmt, const uint8_t *buf,\n                             int buf_size, int align);\n\n/**\n * Reset the internal decoder state / flush internal buffers. Should be called\n * e.g. when seeking or when switching to a different stream.\n *\n * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0),\n * this invalidates the frames previously returned from the decoder. When\n * refcounted frames are used, the decoder just releases any references it might\n * keep internally, but the caller's reference remains valid.\n */\nvoid avcodec_flush_buffers(AVCodecContext *avctx);\n\n/**\n * Return codec bits per sample.\n *\n * @param[in] codec_id the codec\n * @return Number of bits per sample or zero if unknown for the given codec.\n */\nint av_get_bits_per_sample(enum AVCodecID codec_id);\n\n/**\n * Return the PCM codec associated with a sample format.\n * @param be  endianness, 0 for little, 1 for big,\n *            -1 (or anything else) for native\n * @return  AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE\n */\nenum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);\n\n/**\n * Return codec bits per sample.\n * Only return non-zero if the bits per sample is exactly correct, not an\n * approximation.\n *\n * @param[in] codec_id the codec\n * @return Number of bits per sample or zero if unknown for the given codec.\n */\nint av_get_exact_bits_per_sample(enum AVCodecID codec_id);\n\n/**\n * Return audio frame duration.\n *\n * @param avctx        codec context\n * @param frame_bytes  size of the frame, or 0 if unknown\n * @return             frame duration, in samples, if known. 0 if not able to\n *                     determine.\n */\nint av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);\n\n\ntypedef struct AVBitStreamFilterContext {\n    void *priv_data;\n    struct AVBitStreamFilter *filter;\n    AVCodecParserContext *parser;\n    struct AVBitStreamFilterContext *next;\n} AVBitStreamFilterContext;\n\n\ntypedef struct AVBitStreamFilter {\n    const char *name;\n    int priv_data_size;\n    int (*filter)(AVBitStreamFilterContext *bsfc,\n                  AVCodecContext *avctx, const char *args,\n                  uint8_t **poutbuf, int *poutbuf_size,\n                  const uint8_t *buf, int buf_size, int keyframe);\n    void (*close)(AVBitStreamFilterContext *bsfc);\n    struct AVBitStreamFilter *next;\n} AVBitStreamFilter;\n\n/**\n * Register a bitstream filter.\n *\n * The filter will be accessible to the application code through\n * av_bitstream_filter_next() or can be directly initialized with\n * av_bitstream_filter_init().\n *\n * @see avcodec_register_all()\n */\nvoid av_register_bitstream_filter(AVBitStreamFilter *bsf);\n\n/**\n * Create and initialize a bitstream filter context given a bitstream\n * filter name.\n *\n * The returned context must be freed with av_bitstream_filter_close().\n *\n * @param name    the name of the bitstream filter\n * @return a bitstream filter context if a matching filter was found\n * and successfully initialized, NULL otherwise\n */\nAVBitStreamFilterContext *av_bitstream_filter_init(const char *name);\n\n/**\n * Filter bitstream.\n *\n * This function filters the buffer buf with size buf_size, and places the\n * filtered buffer in the buffer pointed to by poutbuf.\n *\n * The output buffer must be freed by the caller.\n *\n * @param bsfc            bitstream filter context created by av_bitstream_filter_init()\n * @param avctx           AVCodecContext accessed by the filter, may be NULL.\n *                        If specified, this must point to the encoder context of the\n *                        output stream the packet is sent to.\n * @param args            arguments which specify the filter configuration, may be NULL\n * @param poutbuf         pointer which is updated to point to the filtered buffer\n * @param poutbuf_size    pointer which is updated to the filtered buffer size in bytes\n * @param buf             buffer containing the data to filter\n * @param buf_size        size in bytes of buf\n * @param keyframe        set to non-zero if the buffer to filter corresponds to a key-frame packet data\n * @return >= 0 in case of success, or a negative error code in case of failure\n *\n * If the return value is positive, an output buffer is allocated and\n * is availble in *poutbuf, and is distinct from the input buffer.\n *\n * If the return value is 0, the output buffer is not allocated and\n * should be considered identical to the input buffer, or in case\n * *poutbuf was set it points to the input buffer (not necessarily to\n * its starting address).\n */\nint av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,\n                               AVCodecContext *avctx, const char *args,\n                               uint8_t **poutbuf, int *poutbuf_size,\n                               const uint8_t *buf, int buf_size, int keyframe);\n\n/**\n * Release bitstream filter context.\n *\n * @param bsf the bitstream filter context created with\n * av_bitstream_filter_init(), can be NULL\n */\nvoid av_bitstream_filter_close(AVBitStreamFilterContext *bsf);\n\n/**\n * If f is NULL, return the first registered bitstream filter,\n * if f is non-NULL, return the next registered bitstream filter\n * after f, or NULL if f is the last one.\n *\n * This function can be used to iterate over all registered bitstream\n * filters.\n */\nAVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f);\n\n/* memory */\n\n/**\n * Same behaviour av_fast_malloc but the buffer has additional\n * FF_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0.\n *\n * In addition the whole buffer will initially and after resizes\n * be 0-initialized so that no uninitialized data will ever appear.\n */\nvoid av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);\n\n/**\n * Same behaviour av_fast_padded_malloc except that buffer will always\n * be 0-initialized after call.\n */\nvoid av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size);\n\n/**\n * Encode extradata length to a buffer. Used by xiph codecs.\n *\n * @param s buffer to write to; must be at least (v/255+1) bytes long\n * @param v size of extradata in bytes\n * @return number of bytes written to the buffer.\n */\nunsigned int av_xiphlacing(unsigned char *s, unsigned int v);\n\n#if FF_API_MISSING_SAMPLE\n/**\n * Log a generic warning message about a missing feature. This function is\n * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)\n * only, and would normally not be used by applications.\n * @param[in] avc a pointer to an arbitrary struct of which the first field is\n * a pointer to an AVClass struct\n * @param[in] feature string containing the name of the missing feature\n * @param[in] want_sample indicates if samples are wanted which exhibit this feature.\n * If want_sample is non-zero, additional verbage will be added to the log\n * message which tells the user how to report samples to the development\n * mailing list.\n * @deprecated Use avpriv_report_missing_feature() instead.\n */\nattribute_deprecated\nvoid av_log_missing_feature(void *avc, const char *feature, int want_sample);\n\n/**\n * Log a generic warning message asking for a sample. This function is\n * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)\n * only, and would normally not be used by applications.\n * @param[in] avc a pointer to an arbitrary struct of which the first field is\n * a pointer to an AVClass struct\n * @param[in] msg string containing an optional message, or NULL if no message\n * @deprecated Use avpriv_request_sample() instead.\n */\nattribute_deprecated\nvoid av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3);\n#endif /* FF_API_MISSING_SAMPLE */\n\n/**\n * Register the hardware accelerator hwaccel.\n */\nvoid av_register_hwaccel(AVHWAccel *hwaccel);\n\n/**\n * If hwaccel is NULL, returns the first registered hardware accelerator,\n * if hwaccel is non-NULL, returns the next registered hardware accelerator\n * after hwaccel, or NULL if hwaccel is the last one.\n */\nAVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel);\n\n\n/**\n * Lock operation used by lockmgr\n */\nenum AVLockOp {\n  AV_LOCK_CREATE,  ///< Create a mutex\n  AV_LOCK_OBTAIN,  ///< Lock the mutex\n  AV_LOCK_RELEASE, ///< Unlock the mutex\n  AV_LOCK_DESTROY, ///< Free mutex resources\n};\n\n/**\n * Register a user provided lock manager supporting the operations\n * specified by AVLockOp. mutex points to a (void *) where the\n * lockmgr should store/get a pointer to a user allocated mutex. It's\n * NULL upon AV_LOCK_CREATE and != NULL for all other ops.\n *\n * @param cb User defined callback. Note: FFmpeg may invoke calls to this\n *           callback during the call to av_lockmgr_register().\n *           Thus, the application must be prepared to handle that.\n *           If cb is set to NULL the lockmgr will be unregistered.\n *           Also note that during unregistration the previously registered\n *           lockmgr callback may also be invoked.\n */\nint av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));\n\n/**\n * Get the type of the given codec.\n */\nenum AVMediaType avcodec_get_type(enum AVCodecID codec_id);\n\n/**\n * Get the name of a codec.\n * @return  a static string identifying the codec; never NULL\n */\nconst char *avcodec_get_name(enum AVCodecID id);\n\n/**\n * @return a positive value if s is open (i.e. avcodec_open2() was called on it\n * with no corresponding avcodec_close()), 0 otherwise.\n */\nint avcodec_is_open(AVCodecContext *s);\n\n/**\n * @return a non-zero number if codec is an encoder, zero otherwise\n */\nint av_codec_is_encoder(const AVCodec *codec);\n\n/**\n * @return a non-zero number if codec is a decoder, zero otherwise\n */\nint av_codec_is_decoder(const AVCodec *codec);\n\n/**\n * @return descriptor for given codec ID or NULL if no descriptor exists.\n */\nconst AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id);\n\n/**\n * Iterate over all codec descriptors known to libavcodec.\n *\n * @param prev previous descriptor. NULL to get the first descriptor.\n *\n * @return next descriptor or NULL after the last descriptor\n */\nconst AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev);\n\n/**\n * @return codec descriptor with the given name or NULL if no such descriptor\n *         exists.\n */\nconst AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name);\n\n/**\n * @}\n */\n\n#endif /* AVCODEC_AVCODEC_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavcodec/avfft.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_AVFFT_H\n#define AVCODEC_AVFFT_H\n\n/**\n * @file\n * @ingroup lavc_fft\n * FFT functions\n */\n\n/**\n * @defgroup lavc_fft FFT functions\n * @ingroup lavc_misc\n *\n * @{\n */\n\ntypedef float FFTSample;\n\ntypedef struct FFTComplex {\n    FFTSample re, im;\n} FFTComplex;\n\ntypedef struct FFTContext FFTContext;\n\n/**\n * Set up a complex FFT.\n * @param nbits           log2 of the length of the input array\n * @param inverse         if 0 perform the forward transform, if 1 perform the inverse\n */\nFFTContext *av_fft_init(int nbits, int inverse);\n\n/**\n * Do the permutation needed BEFORE calling ff_fft_calc().\n */\nvoid av_fft_permute(FFTContext *s, FFTComplex *z);\n\n/**\n * Do a complex FFT with the parameters defined in av_fft_init(). The\n * input data must be permuted before. No 1.0/sqrt(n) normalization is done.\n */\nvoid av_fft_calc(FFTContext *s, FFTComplex *z);\n\nvoid av_fft_end(FFTContext *s);\n\nFFTContext *av_mdct_init(int nbits, int inverse, double scale);\nvoid av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);\nvoid av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input);\nvoid av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);\nvoid av_mdct_end(FFTContext *s);\n\n/* Real Discrete Fourier Transform */\n\nenum RDFTransformType {\n    DFT_R2C,\n    IDFT_C2R,\n    IDFT_R2C,\n    DFT_C2R,\n};\n\ntypedef struct RDFTContext RDFTContext;\n\n/**\n * Set up a real FFT.\n * @param nbits           log2 of the length of the input array\n * @param trans           the type of transform\n */\nRDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans);\nvoid av_rdft_calc(RDFTContext *s, FFTSample *data);\nvoid av_rdft_end(RDFTContext *s);\n\n/* Discrete Cosine Transform */\n\ntypedef struct DCTContext DCTContext;\n\nenum DCTTransformType {\n    DCT_II = 0,\n    DCT_III,\n    DCT_I,\n    DST_I,\n};\n\n/**\n * Set up DCT.\n *\n * @param nbits           size of the input array:\n *                        (1 << nbits)     for DCT-II, DCT-III and DST-I\n *                        (1 << nbits) + 1 for DCT-I\n * @param type            the type of transform\n *\n * @note the first element of the input of DST-I is ignored\n */\nDCTContext *av_dct_init(int nbits, enum DCTTransformType type);\nvoid av_dct_calc(DCTContext *s, FFTSample *data);\nvoid av_dct_end (DCTContext *s);\n\n/**\n * @}\n */\n\n#endif /* AVCODEC_AVFFT_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavcodec/dxva2.h",
    "content": "/*\n * DXVA2 HW acceleration\n *\n * copyright (c) 2009 Laurent Aimar\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_DXVA_H\n#define AVCODEC_DXVA_H\n\n/**\n * @file\n * @ingroup lavc_codec_hwaccel_dxva2\n * Public libavcodec DXVA2 header.\n */\n\n#if defined(_WIN32_WINNT) && _WIN32_WINNT < 0x0600\n#undef _WIN32_WINNT\n#endif\n\n#if !defined(_WIN32_WINNT)\n#define _WIN32_WINNT 0x0600\n#endif\n\n#include <stdint.h>\n#include <d3d9.h>\n#include <dxva2api.h>\n\n/**\n * @defgroup lavc_codec_hwaccel_dxva2 DXVA2\n * @ingroup lavc_codec_hwaccel\n *\n * @{\n */\n\n#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards\n\n/**\n * This structure is used to provides the necessary configurations and data\n * to the DXVA2 FFmpeg HWAccel implementation.\n *\n * The application must make it available as AVCodecContext.hwaccel_context.\n */\nstruct dxva_context {\n    /**\n     * DXVA2 decoder object\n     */\n    IDirectXVideoDecoder *decoder;\n\n    /**\n     * DXVA2 configuration used to create the decoder\n     */\n    const DXVA2_ConfigPictureDecode *cfg;\n\n    /**\n     * The number of surface in the surface array\n     */\n    unsigned surface_count;\n\n    /**\n     * The array of Direct3D surfaces used to create the decoder\n     */\n    LPDIRECT3DSURFACE9 *surface;\n\n    /**\n     * A bit field configuring the workarounds needed for using the decoder\n     */\n    uint64_t workaround;\n\n    /**\n     * Private to the FFmpeg AVHWAccel implementation\n     */\n    unsigned report_id;\n};\n\n/**\n * @}\n */\n\n#endif /* AVCODEC_DXVA_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavcodec/old_codec_ids.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_OLD_CODEC_IDS_H\n#define AVCODEC_OLD_CODEC_IDS_H\n\n#include \"libavutil/common.h\"\n\n/*\n * This header exists to prevent new codec IDs from being accidentally added to\n * the deprecated list.\n * Do not include it directly. It will be removed on next major bump\n *\n * Do not add new items to this list. Use the AVCodecID enum instead.\n */\n\n    CODEC_ID_NONE = AV_CODEC_ID_NONE,\n\n    /* video codecs */\n    CODEC_ID_MPEG1VIDEO,\n    CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding\n#if FF_API_XVMC\n    CODEC_ID_MPEG2VIDEO_XVMC,\n#endif\n    CODEC_ID_H261,\n    CODEC_ID_H263,\n    CODEC_ID_RV10,\n    CODEC_ID_RV20,\n    CODEC_ID_MJPEG,\n    CODEC_ID_MJPEGB,\n    CODEC_ID_LJPEG,\n    CODEC_ID_SP5X,\n    CODEC_ID_JPEGLS,\n    CODEC_ID_MPEG4,\n    CODEC_ID_RAWVIDEO,\n    CODEC_ID_MSMPEG4V1,\n    CODEC_ID_MSMPEG4V2,\n    CODEC_ID_MSMPEG4V3,\n    CODEC_ID_WMV1,\n    CODEC_ID_WMV2,\n    CODEC_ID_H263P,\n    CODEC_ID_H263I,\n    CODEC_ID_FLV1,\n    CODEC_ID_SVQ1,\n    CODEC_ID_SVQ3,\n    CODEC_ID_DVVIDEO,\n    CODEC_ID_HUFFYUV,\n    CODEC_ID_CYUV,\n    CODEC_ID_H264,\n    CODEC_ID_INDEO3,\n    CODEC_ID_VP3,\n    CODEC_ID_THEORA,\n    CODEC_ID_ASV1,\n    CODEC_ID_ASV2,\n    CODEC_ID_FFV1,\n    CODEC_ID_4XM,\n    CODEC_ID_VCR1,\n    CODEC_ID_CLJR,\n    CODEC_ID_MDEC,\n    CODEC_ID_ROQ,\n    CODEC_ID_INTERPLAY_VIDEO,\n    CODEC_ID_XAN_WC3,\n    CODEC_ID_XAN_WC4,\n    CODEC_ID_RPZA,\n    CODEC_ID_CINEPAK,\n    CODEC_ID_WS_VQA,\n    CODEC_ID_MSRLE,\n    CODEC_ID_MSVIDEO1,\n    CODEC_ID_IDCIN,\n    CODEC_ID_8BPS,\n    CODEC_ID_SMC,\n    CODEC_ID_FLIC,\n    CODEC_ID_TRUEMOTION1,\n    CODEC_ID_VMDVIDEO,\n    CODEC_ID_MSZH,\n    CODEC_ID_ZLIB,\n    CODEC_ID_QTRLE,\n    CODEC_ID_TSCC,\n    CODEC_ID_ULTI,\n    CODEC_ID_QDRAW,\n    CODEC_ID_VIXL,\n    CODEC_ID_QPEG,\n    CODEC_ID_PNG,\n    CODEC_ID_PPM,\n    CODEC_ID_PBM,\n    CODEC_ID_PGM,\n    CODEC_ID_PGMYUV,\n    CODEC_ID_PAM,\n    CODEC_ID_FFVHUFF,\n    CODEC_ID_RV30,\n    CODEC_ID_RV40,\n    CODEC_ID_VC1,\n    CODEC_ID_WMV3,\n    CODEC_ID_LOCO,\n    CODEC_ID_WNV1,\n    CODEC_ID_AASC,\n    CODEC_ID_INDEO2,\n    CODEC_ID_FRAPS,\n    CODEC_ID_TRUEMOTION2,\n    CODEC_ID_BMP,\n    CODEC_ID_CSCD,\n    CODEC_ID_MMVIDEO,\n    CODEC_ID_ZMBV,\n    CODEC_ID_AVS,\n    CODEC_ID_SMACKVIDEO,\n    CODEC_ID_NUV,\n    CODEC_ID_KMVC,\n    CODEC_ID_FLASHSV,\n    CODEC_ID_CAVS,\n    CODEC_ID_JPEG2000,\n    CODEC_ID_VMNC,\n    CODEC_ID_VP5,\n    CODEC_ID_VP6,\n    CODEC_ID_VP6F,\n    CODEC_ID_TARGA,\n    CODEC_ID_DSICINVIDEO,\n    CODEC_ID_TIERTEXSEQVIDEO,\n    CODEC_ID_TIFF,\n    CODEC_ID_GIF,\n    CODEC_ID_DXA,\n    CODEC_ID_DNXHD,\n    CODEC_ID_THP,\n    CODEC_ID_SGI,\n    CODEC_ID_C93,\n    CODEC_ID_BETHSOFTVID,\n    CODEC_ID_PTX,\n    CODEC_ID_TXD,\n    CODEC_ID_VP6A,\n    CODEC_ID_AMV,\n    CODEC_ID_VB,\n    CODEC_ID_PCX,\n    CODEC_ID_SUNRAST,\n    CODEC_ID_INDEO4,\n    CODEC_ID_INDEO5,\n    CODEC_ID_MIMIC,\n    CODEC_ID_RL2,\n    CODEC_ID_ESCAPE124,\n    CODEC_ID_DIRAC,\n    CODEC_ID_BFI,\n    CODEC_ID_CMV,\n    CODEC_ID_MOTIONPIXELS,\n    CODEC_ID_TGV,\n    CODEC_ID_TGQ,\n    CODEC_ID_TQI,\n    CODEC_ID_AURA,\n    CODEC_ID_AURA2,\n    CODEC_ID_V210X,\n    CODEC_ID_TMV,\n    CODEC_ID_V210,\n    CODEC_ID_DPX,\n    CODEC_ID_MAD,\n    CODEC_ID_FRWU,\n    CODEC_ID_FLASHSV2,\n    CODEC_ID_CDGRAPHICS,\n    CODEC_ID_R210,\n    CODEC_ID_ANM,\n    CODEC_ID_BINKVIDEO,\n    CODEC_ID_IFF_ILBM,\n    CODEC_ID_IFF_BYTERUN1,\n    CODEC_ID_KGV1,\n    CODEC_ID_YOP,\n    CODEC_ID_VP8,\n    CODEC_ID_PICTOR,\n    CODEC_ID_ANSI,\n    CODEC_ID_A64_MULTI,\n    CODEC_ID_A64_MULTI5,\n    CODEC_ID_R10K,\n    CODEC_ID_MXPEG,\n    CODEC_ID_LAGARITH,\n    CODEC_ID_PRORES,\n    CODEC_ID_JV,\n    CODEC_ID_DFA,\n    CODEC_ID_WMV3IMAGE,\n    CODEC_ID_VC1IMAGE,\n    CODEC_ID_UTVIDEO,\n    CODEC_ID_BMV_VIDEO,\n    CODEC_ID_VBLE,\n    CODEC_ID_DXTORY,\n    CODEC_ID_V410,\n    CODEC_ID_XWD,\n    CODEC_ID_CDXL,\n    CODEC_ID_XBM,\n    CODEC_ID_ZEROCODEC,\n    CODEC_ID_MSS1,\n    CODEC_ID_MSA1,\n    CODEC_ID_TSCC2,\n    CODEC_ID_MTS2,\n    CODEC_ID_CLLC,\n    CODEC_ID_Y41P       = MKBETAG('Y','4','1','P'),\n    CODEC_ID_ESCAPE130  = MKBETAG('E','1','3','0'),\n    CODEC_ID_EXR        = MKBETAG('0','E','X','R'),\n    CODEC_ID_AVRP       = MKBETAG('A','V','R','P'),\n\n    CODEC_ID_G2M        = MKBETAG( 0 ,'G','2','M'),\n    CODEC_ID_AVUI       = MKBETAG('A','V','U','I'),\n    CODEC_ID_AYUV       = MKBETAG('A','Y','U','V'),\n    CODEC_ID_V308       = MKBETAG('V','3','0','8'),\n    CODEC_ID_V408       = MKBETAG('V','4','0','8'),\n    CODEC_ID_YUV4       = MKBETAG('Y','U','V','4'),\n    CODEC_ID_SANM       = MKBETAG('S','A','N','M'),\n    CODEC_ID_PAF_VIDEO  = MKBETAG('P','A','F','V'),\n    CODEC_ID_SNOW       = AV_CODEC_ID_SNOW,\n\n    /* various PCM \"codecs\" */\n    CODEC_ID_FIRST_AUDIO = 0x10000,     ///< A dummy id pointing at the start of audio codecs\n    CODEC_ID_PCM_S16LE = 0x10000,\n    CODEC_ID_PCM_S16BE,\n    CODEC_ID_PCM_U16LE,\n    CODEC_ID_PCM_U16BE,\n    CODEC_ID_PCM_S8,\n    CODEC_ID_PCM_U8,\n    CODEC_ID_PCM_MULAW,\n    CODEC_ID_PCM_ALAW,\n    CODEC_ID_PCM_S32LE,\n    CODEC_ID_PCM_S32BE,\n    CODEC_ID_PCM_U32LE,\n    CODEC_ID_PCM_U32BE,\n    CODEC_ID_PCM_S24LE,\n    CODEC_ID_PCM_S24BE,\n    CODEC_ID_PCM_U24LE,\n    CODEC_ID_PCM_U24BE,\n    CODEC_ID_PCM_S24DAUD,\n    CODEC_ID_PCM_ZORK,\n    CODEC_ID_PCM_S16LE_PLANAR,\n    CODEC_ID_PCM_DVD,\n    CODEC_ID_PCM_F32BE,\n    CODEC_ID_PCM_F32LE,\n    CODEC_ID_PCM_F64BE,\n    CODEC_ID_PCM_F64LE,\n    CODEC_ID_PCM_BLURAY,\n    CODEC_ID_PCM_LXF,\n    CODEC_ID_S302M,\n    CODEC_ID_PCM_S8_PLANAR,\n\n    /* various ADPCM codecs */\n    CODEC_ID_ADPCM_IMA_QT = 0x11000,\n    CODEC_ID_ADPCM_IMA_WAV,\n    CODEC_ID_ADPCM_IMA_DK3,\n    CODEC_ID_ADPCM_IMA_DK4,\n    CODEC_ID_ADPCM_IMA_WS,\n    CODEC_ID_ADPCM_IMA_SMJPEG,\n    CODEC_ID_ADPCM_MS,\n    CODEC_ID_ADPCM_4XM,\n    CODEC_ID_ADPCM_XA,\n    CODEC_ID_ADPCM_ADX,\n    CODEC_ID_ADPCM_EA,\n    CODEC_ID_ADPCM_G726,\n    CODEC_ID_ADPCM_CT,\n    CODEC_ID_ADPCM_SWF,\n    CODEC_ID_ADPCM_YAMAHA,\n    CODEC_ID_ADPCM_SBPRO_4,\n    CODEC_ID_ADPCM_SBPRO_3,\n    CODEC_ID_ADPCM_SBPRO_2,\n    CODEC_ID_ADPCM_THP,\n    CODEC_ID_ADPCM_IMA_AMV,\n    CODEC_ID_ADPCM_EA_R1,\n    CODEC_ID_ADPCM_EA_R3,\n    CODEC_ID_ADPCM_EA_R2,\n    CODEC_ID_ADPCM_IMA_EA_SEAD,\n    CODEC_ID_ADPCM_IMA_EA_EACS,\n    CODEC_ID_ADPCM_EA_XAS,\n    CODEC_ID_ADPCM_EA_MAXIS_XA,\n    CODEC_ID_ADPCM_IMA_ISS,\n    CODEC_ID_ADPCM_G722,\n    CODEC_ID_ADPCM_IMA_APC,\n    CODEC_ID_VIMA       = MKBETAG('V','I','M','A'),\n\n    /* AMR */\n    CODEC_ID_AMR_NB = 0x12000,\n    CODEC_ID_AMR_WB,\n\n    /* RealAudio codecs*/\n    CODEC_ID_RA_144 = 0x13000,\n    CODEC_ID_RA_288,\n\n    /* various DPCM codecs */\n    CODEC_ID_ROQ_DPCM = 0x14000,\n    CODEC_ID_INTERPLAY_DPCM,\n    CODEC_ID_XAN_DPCM,\n    CODEC_ID_SOL_DPCM,\n\n    /* audio codecs */\n    CODEC_ID_MP2 = 0x15000,\n    CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3\n    CODEC_ID_AAC,\n    CODEC_ID_AC3,\n    CODEC_ID_DTS,\n    CODEC_ID_VORBIS,\n    CODEC_ID_DVAUDIO,\n    CODEC_ID_WMAV1,\n    CODEC_ID_WMAV2,\n    CODEC_ID_MACE3,\n    CODEC_ID_MACE6,\n    CODEC_ID_VMDAUDIO,\n    CODEC_ID_FLAC,\n    CODEC_ID_MP3ADU,\n    CODEC_ID_MP3ON4,\n    CODEC_ID_SHORTEN,\n    CODEC_ID_ALAC,\n    CODEC_ID_WESTWOOD_SND1,\n    CODEC_ID_GSM, ///< as in Berlin toast format\n    CODEC_ID_QDM2,\n    CODEC_ID_COOK,\n    CODEC_ID_TRUESPEECH,\n    CODEC_ID_TTA,\n    CODEC_ID_SMACKAUDIO,\n    CODEC_ID_QCELP,\n    CODEC_ID_WAVPACK,\n    CODEC_ID_DSICINAUDIO,\n    CODEC_ID_IMC,\n    CODEC_ID_MUSEPACK7,\n    CODEC_ID_MLP,\n    CODEC_ID_GSM_MS, /* as found in WAV */\n    CODEC_ID_ATRAC3,\n    CODEC_ID_VOXWARE,\n    CODEC_ID_APE,\n    CODEC_ID_NELLYMOSER,\n    CODEC_ID_MUSEPACK8,\n    CODEC_ID_SPEEX,\n    CODEC_ID_WMAVOICE,\n    CODEC_ID_WMAPRO,\n    CODEC_ID_WMALOSSLESS,\n    CODEC_ID_ATRAC3P,\n    CODEC_ID_EAC3,\n    CODEC_ID_SIPR,\n    CODEC_ID_MP1,\n    CODEC_ID_TWINVQ,\n    CODEC_ID_TRUEHD,\n    CODEC_ID_MP4ALS,\n    CODEC_ID_ATRAC1,\n    CODEC_ID_BINKAUDIO_RDFT,\n    CODEC_ID_BINKAUDIO_DCT,\n    CODEC_ID_AAC_LATM,\n    CODEC_ID_QDMC,\n    CODEC_ID_CELT,\n    CODEC_ID_G723_1,\n    CODEC_ID_G729,\n    CODEC_ID_8SVX_EXP,\n    CODEC_ID_8SVX_FIB,\n    CODEC_ID_BMV_AUDIO,\n    CODEC_ID_RALF,\n    CODEC_ID_IAC,\n    CODEC_ID_ILBC,\n    CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),\n    CODEC_ID_SONIC       = MKBETAG('S','O','N','C'),\n    CODEC_ID_SONIC_LS    = MKBETAG('S','O','N','L'),\n    CODEC_ID_PAF_AUDIO   = MKBETAG('P','A','F','A'),\n    CODEC_ID_OPUS        = MKBETAG('O','P','U','S'),\n\n    /* subtitle codecs */\n    CODEC_ID_FIRST_SUBTITLE = 0x17000,          ///< A dummy ID pointing at the start of subtitle codecs.\n    CODEC_ID_DVD_SUBTITLE = 0x17000,\n    CODEC_ID_DVB_SUBTITLE,\n    CODEC_ID_TEXT,  ///< raw UTF-8 text\n    CODEC_ID_XSUB,\n    CODEC_ID_SSA,\n    CODEC_ID_MOV_TEXT,\n    CODEC_ID_HDMV_PGS_SUBTITLE,\n    CODEC_ID_DVB_TELETEXT,\n    CODEC_ID_SRT,\n    CODEC_ID_MICRODVD   = MKBETAG('m','D','V','D'),\n    CODEC_ID_EIA_608    = MKBETAG('c','6','0','8'),\n    CODEC_ID_JACOSUB    = MKBETAG('J','S','U','B'),\n    CODEC_ID_SAMI       = MKBETAG('S','A','M','I'),\n    CODEC_ID_REALTEXT   = MKBETAG('R','T','X','T'),\n    CODEC_ID_SUBVIEWER  = MKBETAG('S','u','b','V'),\n\n    /* other specific kind of codecs (generally used for attachments) */\n    CODEC_ID_FIRST_UNKNOWN = 0x18000,           ///< A dummy ID pointing at the start of various fake codecs.\n    CODEC_ID_TTF = 0x18000,\n    CODEC_ID_BINTEXT    = MKBETAG('B','T','X','T'),\n    CODEC_ID_XBIN       = MKBETAG('X','B','I','N'),\n    CODEC_ID_IDF        = MKBETAG( 0 ,'I','D','F'),\n    CODEC_ID_OTF        = MKBETAG( 0 ,'O','T','F'),\n\n    CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it\n\n    CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS\n                                * stream (only used by libavformat) */\n    CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems\n                                * stream (only used by libavformat) */\n    CODEC_ID_FFMETADATA = 0x21000,   ///< Dummy codec for streams containing only metadata information.\n\n#endif /* AVCODEC_OLD_CODEC_IDS_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavcodec/vaapi.h",
    "content": "/*\n * Video Acceleration API (shared data between FFmpeg and the video player)\n * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1\n *\n * Copyright (C) 2008-2009 Splitted-Desktop Systems\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_VAAPI_H\n#define AVCODEC_VAAPI_H\n\n/**\n * @file\n * @ingroup lavc_codec_hwaccel_vaapi\n * Public libavcodec VA API header.\n */\n\n#include <stdint.h>\n\n/**\n * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding\n * @ingroup lavc_codec_hwaccel\n * @{\n */\n\n/**\n * This structure is used to share data between the FFmpeg library and\n * the client video application.\n * This shall be zero-allocated and available as\n * AVCodecContext.hwaccel_context. All user members can be set once\n * during initialization or through each AVCodecContext.get_buffer()\n * function call. In any case, they must be valid prior to calling\n * decoding functions.\n */\nstruct vaapi_context {\n    /**\n     * Window system dependent data\n     *\n     * - encoding: unused\n     * - decoding: Set by user\n     */\n    void *display;\n\n    /**\n     * Configuration ID\n     *\n     * - encoding: unused\n     * - decoding: Set by user\n     */\n    uint32_t config_id;\n\n    /**\n     * Context ID (video decode pipeline)\n     *\n     * - encoding: unused\n     * - decoding: Set by user\n     */\n    uint32_t context_id;\n\n    /**\n     * VAPictureParameterBuffer ID\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    uint32_t pic_param_buf_id;\n\n    /**\n     * VAIQMatrixBuffer ID\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    uint32_t iq_matrix_buf_id;\n\n    /**\n     * VABitPlaneBuffer ID (for VC-1 decoding)\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    uint32_t bitplane_buf_id;\n\n    /**\n     * Slice parameter/data buffer IDs\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    uint32_t *slice_buf_ids;\n\n    /**\n     * Number of effective slice buffer IDs to send to the HW\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    unsigned int n_slice_buf_ids;\n\n    /**\n     * Size of pre-allocated slice_buf_ids\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    unsigned int slice_buf_ids_alloc;\n\n    /**\n     * Pointer to VASliceParameterBuffers\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    void *slice_params;\n\n    /**\n     * Size of a VASliceParameterBuffer element\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    unsigned int slice_param_size;\n\n    /**\n     * Size of pre-allocated slice_params\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    unsigned int slice_params_alloc;\n\n    /**\n     * Number of slices currently filled in\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    unsigned int slice_count;\n\n    /**\n     * Pointer to slice data buffer base\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    const uint8_t *slice_data;\n\n    /**\n     * Current size of slice data\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    uint32_t slice_data_size;\n};\n\n/* @} */\n\n#endif /* AVCODEC_VAAPI_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavcodec/vda.h",
    "content": "/*\n * VDA HW acceleration\n *\n * copyright (c) 2011 Sebastien Zwickert\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_VDA_H\n#define AVCODEC_VDA_H\n\n/**\n * @file\n * @ingroup lavc_codec_hwaccel_vda\n * Public libavcodec VDA header.\n */\n\n#include <stdint.h>\n\n// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes\n// http://openradar.appspot.com/8026390\n#undef __GNUC_STDC_INLINE__\n\n#define Picture QuickdrawPicture\n#include <VideoDecodeAcceleration/VDADecoder.h>\n#undef Picture\n\n#include \"libavcodec/version.h\"\n\n// extra flags not defined in VDADecoder.h\nenum {\n    kVDADecodeInfo_Asynchronous = 1UL << 0,\n    kVDADecodeInfo_FrameDropped = 1UL << 1\n};\n\n/**\n * @defgroup lavc_codec_hwaccel_vda VDA\n * @ingroup lavc_codec_hwaccel\n *\n * @{\n */\n\n/**\n * This structure is used to provide the necessary configurations and data\n * to the VDA FFmpeg HWAccel implementation.\n *\n * The application must make it available as AVCodecContext.hwaccel_context.\n */\nstruct vda_context {\n    /**\n     * VDA decoder object.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by libavcodec.\n     */\n    VDADecoder          decoder;\n\n    /**\n     * The Core Video pixel buffer that contains the current image data.\n     *\n     * encoding: unused\n     * decoding: Set by libavcodec. Unset by user.\n     */\n    CVPixelBufferRef    cv_buffer;\n\n    /**\n     * Use the hardware decoder in synchronous mode.\n     *\n     * encoding: unused\n     * decoding: Set by user.\n     */\n    int                 use_sync_decoding;\n\n    /**\n     * The frame width.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by user.\n     */\n    int                 width;\n\n    /**\n     * The frame height.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by user.\n     */\n    int                 height;\n\n    /**\n     * The frame format.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by user.\n     */\n    int                 format;\n\n    /**\n     * The pixel format for output image buffers.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by user.\n     */\n    OSType              cv_pix_fmt_type;\n\n    /**\n     * The current bitstream buffer.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by libavcodec.\n     */\n    uint8_t             *priv_bitstream;\n\n    /**\n     * The current size of the bitstream.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by libavcodec.\n     */\n    int                 priv_bitstream_size;\n\n    /**\n     * The reference size used for fast reallocation.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by libavcodec.\n     */\n    int                 priv_allocated_size;\n\n    /**\n     * Use av_buffer to manage buffer.\n     * When the flag is set, the CVPixelBuffers returned by the decoder will\n     * be released automatically, so you have to retain them if necessary.\n     * Not setting this flag may cause memory leak.\n     *\n     * encoding: unused\n     * decoding: Set by user.\n     */\n    int                 use_ref_buffer;\n};\n\n/** Create the video decoder. */\nint ff_vda_create_decoder(struct vda_context *vda_ctx,\n                          uint8_t *extradata,\n                          int extradata_size);\n\n/** Destroy the video decoder. */\nint ff_vda_destroy_decoder(struct vda_context *vda_ctx);\n\n/**\n * @}\n */\n\n#endif /* AVCODEC_VDA_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavcodec/vdpau.h",
    "content": "/*\n * The Video Decode and Presentation API for UNIX (VDPAU) is used for\n * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.\n *\n * Copyright (C) 2008 NVIDIA\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_VDPAU_H\n#define AVCODEC_VDPAU_H\n\n/**\n * @file\n * @ingroup lavc_codec_hwaccel_vdpau\n * Public libavcodec VDPAU header.\n */\n\n\n/**\n * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer\n * @ingroup lavc_codec_hwaccel\n *\n * VDPAU hardware acceleration has two modules\n * - VDPAU decoding\n * - VDPAU presentation\n *\n * The VDPAU decoding module parses all headers using FFmpeg\n * parsing mechanisms and uses VDPAU for the actual decoding.\n *\n * As per the current implementation, the actual decoding\n * and rendering (API calls) are done as part of the VDPAU\n * presentation (vo_vdpau.c) module.\n *\n * @{\n */\n\n#include <vdpau/vdpau.h>\n#include <vdpau/vdpau_x11.h>\n#include \"libavutil/avconfig.h\"\n#include \"libavutil/attributes.h\"\n\n#include \"avcodec.h\"\n#include \"version.h\"\n\n#if FF_API_BUFS_VDPAU\nunion AVVDPAUPictureInfo {\n    VdpPictureInfoH264        h264;\n    VdpPictureInfoMPEG1Or2    mpeg;\n    VdpPictureInfoVC1          vc1;\n    VdpPictureInfoMPEG4Part2 mpeg4;\n};\n#endif\n\nstruct AVCodecContext;\nstruct AVFrame;\n\ntypedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *,\n                               const VdpPictureInfo *, uint32_t,\n                               const VdpBitstreamBuffer *);\n\n/**\n * This structure is used to share data between the libavcodec library and\n * the client video application.\n * The user shall allocate the structure via the av_alloc_vdpau_hwaccel\n * function and make it available as\n * AVCodecContext.hwaccel_context. Members can be set by the user once\n * during initialization or through each AVCodecContext.get_buffer()\n * function call. In any case, they must be valid prior to calling\n * decoding functions.\n *\n * The size of this structure is not a part of the public ABI and must not\n * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an\n * AVVDPAUContext.\n */\ntypedef struct AVVDPAUContext {\n    /**\n     * VDPAU decoder handle\n     *\n     * Set by user.\n     */\n    VdpDecoder decoder;\n\n    /**\n     * VDPAU decoder render callback\n     *\n     * Set by the user.\n     */\n    VdpDecoderRender *render;\n\n#if FF_API_BUFS_VDPAU\n    /**\n     * VDPAU picture information\n     *\n     * Set by libavcodec.\n     */\n    attribute_deprecated\n    union AVVDPAUPictureInfo info;\n\n    /**\n     * Allocated size of the bitstream_buffers table.\n     *\n     * Set by libavcodec.\n     */\n    attribute_deprecated\n    int bitstream_buffers_allocated;\n\n    /**\n     * Useful bitstream buffers in the bitstream buffers table.\n     *\n     * Set by libavcodec.\n     */\n    attribute_deprecated\n    int bitstream_buffers_used;\n\n   /**\n     * Table of bitstream buffers.\n     * The user is responsible for freeing this buffer using av_freep().\n     *\n     * Set by libavcodec.\n     */\n    attribute_deprecated\n    VdpBitstreamBuffer *bitstream_buffers;\n#endif\n    AVVDPAU_Render2 render2;\n} AVVDPAUContext;\n\n/**\n * @brief allocation function for AVVDPAUContext\n *\n * Allows extending the struct without breaking API/ABI\n */\nAVVDPAUContext *av_alloc_vdpaucontext(void);\n\nAVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *);\nvoid av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2);\n\n/**\n * Allocate an AVVDPAUContext.\n *\n * @return Newly-allocated AVVDPAUContext or NULL on failure.\n */\nAVVDPAUContext *av_vdpau_alloc_context(void);\n\n/**\n * Get a decoder profile that should be used for initializing a VDPAU decoder.\n * Should be called from the AVCodecContext.get_format() callback.\n *\n * @param avctx the codec context being used for decoding the stream\n * @param profile a pointer into which the result will be written on success.\n *                The contents of profile are undefined if this function returns\n *                an error.\n *\n * @return 0 on success (non-negative), a negative AVERROR on failure.\n */\nint av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile);\n\n#if FF_API_CAP_VDPAU\n/** @brief The videoSurface is used for rendering. */\n#define FF_VDPAU_STATE_USED_FOR_RENDER 1\n\n/**\n * @brief The videoSurface is needed for reference/prediction.\n * The codec manipulates this.\n */\n#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2\n\n/**\n * @brief This structure is used as a callback between the FFmpeg\n * decoder (vd_) and presentation (vo_) module.\n * This is used for defining a video frame containing surface,\n * picture parameter, bitstream information etc which are passed\n * between the FFmpeg decoder and its clients.\n */\nstruct vdpau_render_state {\n    VdpVideoSurface surface; ///< Used as rendered surface, never changed.\n\n    int state; ///< Holds FF_VDPAU_STATE_* values.\n\n#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI\n    /** picture parameter information for all supported codecs */\n    union AVVDPAUPictureInfo info;\n#endif\n\n    /** Describe size/location of the compressed video data.\n        Set to 0 when freeing bitstream_buffers. */\n    int bitstream_buffers_allocated;\n    int bitstream_buffers_used;\n    /** The user is responsible for freeing this buffer using av_freep(). */\n    VdpBitstreamBuffer *bitstream_buffers;\n\n#if !AV_HAVE_INCOMPATIBLE_LIBAV_ABI\n    /** picture parameter information for all supported codecs */\n    union AVVDPAUPictureInfo info;\n#endif\n};\n#endif\n\n/* @}*/\n\n#endif /* AVCODEC_VDPAU_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavcodec/version.h",
    "content": "/*\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_VERSION_H\n#define AVCODEC_VERSION_H\n\n/**\n * @file\n * @ingroup libavc\n * Libavcodec version macros.\n */\n\n#include \"libavutil/version.h\"\n\n#define LIBAVCODEC_VERSION_MAJOR 55\n#define LIBAVCODEC_VERSION_MINOR  52\n#define LIBAVCODEC_VERSION_MICRO 102\n\n#define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \\\n                                               LIBAVCODEC_VERSION_MINOR, \\\n                                               LIBAVCODEC_VERSION_MICRO)\n#define LIBAVCODEC_VERSION      AV_VERSION(LIBAVCODEC_VERSION_MAJOR,    \\\n                                           LIBAVCODEC_VERSION_MINOR,    \\\n                                           LIBAVCODEC_VERSION_MICRO)\n#define LIBAVCODEC_BUILD        LIBAVCODEC_VERSION_INT\n\n#define LIBAVCODEC_IDENT        \"Lavc\" AV_STRINGIFY(LIBAVCODEC_VERSION)\n\n/**\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n */\n\n#ifndef FF_API_REQUEST_CHANNELS\n#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_OLD_DECODE_AUDIO\n#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_OLD_ENCODE_AUDIO\n#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_OLD_ENCODE_VIDEO\n#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_CODEC_ID\n#define FF_API_CODEC_ID          (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_AUDIO_CONVERT\n#define FF_API_AUDIO_CONVERT     (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_AVCODEC_RESAMPLE\n#define FF_API_AVCODEC_RESAMPLE  FF_API_AUDIO_CONVERT\n#endif\n#ifndef FF_API_DEINTERLACE\n#define FF_API_DEINTERLACE       (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_DESTRUCT_PACKET\n#define FF_API_DESTRUCT_PACKET   (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_GET_BUFFER\n#define FF_API_GET_BUFFER        (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_MISSING_SAMPLE\n#define FF_API_MISSING_SAMPLE    (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_LOWRES\n#define FF_API_LOWRES            (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_CAP_VDPAU\n#define FF_API_CAP_VDPAU         (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_BUFS_VDPAU\n#define FF_API_BUFS_VDPAU        (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_VOXWARE\n#define FF_API_VOXWARE           (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_SET_DIMENSIONS\n#define FF_API_SET_DIMENSIONS    (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_DEBUG_MV\n#define FF_API_DEBUG_MV          (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_AC_VLC\n#define FF_API_AC_VLC            (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_OLD_MSMPEG4\n#define FF_API_OLD_MSMPEG4       (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_ASPECT_EXTENDED\n#define FF_API_ASPECT_EXTENDED   (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_THREAD_OPAQUE\n#define FF_API_THREAD_OPAQUE     (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_CODEC_PKT\n#define FF_API_CODEC_PKT         (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_ARCH_ALPHA\n#define FF_API_ARCH_ALPHA        (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_XVMC\n#define FF_API_XVMC              (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_ERROR_RATE\n#define FF_API_ERROR_RATE        (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_QSCALE_TYPE\n#define FF_API_QSCALE_TYPE       (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_MB_TYPE\n#define FF_API_MB_TYPE           (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_MAX_BFRAMES\n#define FF_API_MAX_BFRAMES       (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_FAST_MALLOC\n#define FF_API_FAST_MALLOC       (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_NEG_LINESIZES\n#define FF_API_NEG_LINESIZES     (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_EMU_EDGE\n#define FF_API_EMU_EDGE          (LIBAVCODEC_VERSION_MAJOR < 56)\n#endif\n\n#endif /* AVCODEC_VERSION_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavcodec/xvmc.h",
    "content": "/*\n * Copyright (C) 2003 Ivan Kalvachev\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_XVMC_H\n#define AVCODEC_XVMC_H\n\n/**\n * @file\n * @ingroup lavc_codec_hwaccel_xvmc\n * Public libavcodec XvMC header.\n */\n\n#include <X11/extensions/XvMC.h>\n\n#include \"libavutil/attributes.h\"\n#include \"version.h\"\n#include \"avcodec.h\"\n\n/**\n * @defgroup lavc_codec_hwaccel_xvmc XvMC\n * @ingroup lavc_codec_hwaccel\n *\n * @{\n */\n\n#define AV_XVMC_ID                    0x1DC711C0  /**< special value to ensure that regular pixel routines haven't corrupted the struct\n                                                       the number is 1337 speak for the letters IDCT MCo (motion compensation) */\n\nattribute_deprecated struct xvmc_pix_fmt {\n    /** The field contains the special constant value AV_XVMC_ID.\n        It is used as a test that the application correctly uses the API,\n        and that there is no corruption caused by pixel routines.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    int             xvmc_id;\n\n    /** Pointer to the block array allocated by XvMCCreateBlocks().\n        The array has to be freed by XvMCDestroyBlocks().\n        Each group of 64 values represents one data block of differential\n        pixel information (in MoCo mode) or coefficients for IDCT.\n        - application - set the pointer during initialization\n        - libavcodec  - fills coefficients/pixel data into the array\n    */\n    short*          data_blocks;\n\n    /** Pointer to the macroblock description array allocated by\n        XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks().\n        - application - set the pointer during initialization\n        - libavcodec  - fills description data into the array\n    */\n    XvMCMacroBlock* mv_blocks;\n\n    /** Number of macroblock descriptions that can be stored in the mv_blocks\n        array.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    int             allocated_mv_blocks;\n\n    /** Number of blocks that can be stored at once in the data_blocks array.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    int             allocated_data_blocks;\n\n    /** Indicate that the hardware would interpret data_blocks as IDCT\n        coefficients and perform IDCT on them.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    int             idct;\n\n    /** In MoCo mode it indicates that intra macroblocks are assumed to be in\n        unsigned format; same as the XVMC_INTRA_UNSIGNED flag.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    int             unsigned_intra;\n\n    /** Pointer to the surface allocated by XvMCCreateSurface().\n        It has to be freed by XvMCDestroySurface() on application exit.\n        It identifies the frame and its state on the video hardware.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    XvMCSurface*    p_surface;\n\n/** Set by the decoder before calling ff_draw_horiz_band(),\n    needed by the XvMCRenderSurface function. */\n//@{\n    /** Pointer to the surface used as past reference\n        - application - unchanged\n        - libavcodec  - set\n    */\n    XvMCSurface*    p_past_surface;\n\n    /** Pointer to the surface used as future reference\n        - application - unchanged\n        - libavcodec  - set\n    */\n    XvMCSurface*    p_future_surface;\n\n    /** top/bottom field or frame\n        - application - unchanged\n        - libavcodec  - set\n    */\n    unsigned int    picture_structure;\n\n    /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence\n        - application - unchanged\n        - libavcodec  - set\n    */\n    unsigned int    flags;\n//}@\n\n    /** Number of macroblock descriptions in the mv_blocks array\n        that have already been passed to the hardware.\n        - application - zeroes it on get_buffer().\n                        A successful ff_draw_horiz_band() may increment it\n                        with filled_mb_block_num or zero both.\n        - libavcodec  - unchanged\n    */\n    int             start_mv_blocks_num;\n\n    /** Number of new macroblock descriptions in the mv_blocks array (after\n        start_mv_blocks_num) that are filled by libavcodec and have to be\n        passed to the hardware.\n        - application - zeroes it on get_buffer() or after successful\n                        ff_draw_horiz_band().\n        - libavcodec  - increment with one of each stored MB\n    */\n    int             filled_mv_blocks_num;\n\n    /** Number of the next free data block; one data block consists of\n        64 short values in the data_blocks array.\n        All blocks before this one have already been claimed by placing their\n        position into the corresponding block description structure field,\n        that are part of the mv_blocks array.\n        - application - zeroes it on get_buffer().\n                        A successful ff_draw_horiz_band() may zero it together\n                        with start_mb_blocks_num.\n        - libavcodec  - each decoded macroblock increases it by the number\n                        of coded blocks it contains.\n    */\n    int             next_free_data_block_num;\n};\n\n/**\n * @}\n */\n\n#endif /* AVCODEC_XVMC_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavdevice/avdevice.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVDEVICE_AVDEVICE_H\n#define AVDEVICE_AVDEVICE_H\n\n#include \"version.h\"\n\n/**\n * @file\n * @ingroup lavd\n * Main libavdevice API header\n */\n\n/**\n * @defgroup lavd Special devices muxing/demuxing library\n * @{\n * Libavdevice is a complementary library to @ref libavf \"libavformat\". It\n * provides various \"special\" platform-specific muxers and demuxers, e.g. for\n * grabbing devices, audio capture and playback etc. As a consequence, the\n * (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own\n * I/O functions). The filename passed to avformat_open_input() often does not\n * refer to an actually existing file, but has some special device-specific\n * meaning - e.g. for x11grab it is the display name.\n *\n * To use libavdevice, simply call avdevice_register_all() to register all\n * compiled muxers and demuxers. They all use standard libavformat API.\n * @}\n */\n\n#include \"libavformat/avformat.h\"\n\n/**\n * Return the LIBAVDEVICE_VERSION_INT constant.\n */\nunsigned avdevice_version(void);\n\n/**\n * Return the libavdevice build-time configuration.\n */\nconst char *avdevice_configuration(void);\n\n/**\n * Return the libavdevice license.\n */\nconst char *avdevice_license(void);\n\n/**\n * Initialize libavdevice and register all the input and output devices.\n * @warning This function is not thread safe.\n */\nvoid avdevice_register_all(void);\n\ntypedef struct AVDeviceRect {\n    int x;      /**< x coordinate of top left corner */\n    int y;      /**< y coordinate of top left corner */\n    int width;  /**< width */\n    int height; /**< height */\n} AVDeviceRect;\n\n/**\n * Message types used by avdevice_app_to_dev_control_message().\n */\nenum AVAppToDevMessageType {\n    /**\n     * Dummy message.\n     */\n    AV_APP_TO_DEV_NONE = MKBETAG('N','O','N','E'),\n\n    /**\n     * Window size change message.\n     *\n     * Message is sent to the device every time the application changes the size\n     * of the window device renders to.\n     * Message should also be sent right after window is created.\n     *\n     * data: AVDeviceRect: new window size.\n     */\n    AV_APP_TO_DEV_WINDOW_SIZE = MKBETAG('G','E','O','M'),\n\n    /**\n     * Repaint request message.\n     *\n     * Message is sent to the device when window have to be rapainted.\n     *\n     * data: AVDeviceRect: area required to be repainted.\n     *       NULL: whole area is required to be repainted.\n     */\n    AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A')\n};\n\n/**\n * Message types used by avdevice_dev_to_app_control_message().\n */\nenum AVDevToAppMessageType {\n    /**\n     * Dummy message.\n     */\n    AV_DEV_TO_APP_NONE = MKBETAG('N','O','N','E'),\n\n    /**\n     * Create window buffer message.\n     *\n     * Device requests to create a window buffer. Exact meaning is device-\n     * and application-dependent. Message is sent before rendering first\n     * frame and all one-shot initializations should be done here.\n     * Application is allowed to ignore preferred window buffer size.\n     *\n     * @note: Application is obligated to inform about window buffer size\n     *        with AV_APP_TO_DEV_WINDOW_SIZE message.\n     *\n     * data: AVDeviceRect: preferred size of the window buffer.\n     *       NULL: no preferred size of the window buffer.\n     */\n    AV_DEV_TO_APP_CREATE_WINDOW_BUFFER = MKBETAG('B','C','R','E'),\n\n    /**\n     * Prepare window buffer message.\n     *\n     * Device requests to prepare a window buffer for rendering.\n     * Exact meaning is device- and application-dependent.\n     * Message is sent before rendering of each frame.\n     *\n     * data: NULL.\n     */\n    AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER = MKBETAG('B','P','R','E'),\n\n    /**\n     * Display window buffer message.\n     *\n     * Device requests to display a window buffer.\n     * Message is sent when new frame is ready to be displyed.\n     * Usually buffers need to be swapped in handler of this message.\n     *\n     * data: NULL.\n     */\n    AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER = MKBETAG('B','D','I','S'),\n\n    /**\n     * Destroy window buffer message.\n     *\n     * Device requests to destroy a window buffer.\n     * Message is sent when device is about to be destroyed and window\n     * buffer is not required anymore.\n     *\n     * data: NULL.\n     */\n    AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S')\n};\n\n/**\n * Send control message from application to device.\n *\n * @param s         device context.\n * @param type      message type.\n * @param data      message data. Exact type depends on message type.\n * @param data_size size of message data.\n * @return >= 0 on success, negative on error.\n *         AVERROR(ENOSYS) when device doesn't implement handler of the message.\n */\nint avdevice_app_to_dev_control_message(struct AVFormatContext *s,\n                                        enum AVAppToDevMessageType type,\n                                        void *data, size_t data_size);\n\n/**\n * Send control message from device to application.\n *\n * @param s         device context.\n * @param type      message type.\n * @param data      message data. Can be NULL.\n * @param data_size size of message data.\n * @return >= 0 on success, negative on error.\n *         AVERROR(ENOSYS) when application doesn't implement handler of the message.\n */\nint avdevice_dev_to_app_control_message(struct AVFormatContext *s,\n                                        enum AVDevToAppMessageType type,\n                                        void *data, size_t data_size);\n\n/**\n * Structure describes basic parameters of the device.\n */\ntypedef struct AVDeviceInfo {\n    char *device_name;                   /**< device name, format depends on device */\n    char *device_description;            /**< human friendly name */\n} AVDeviceInfo;\n\n/**\n * List of devices.\n */\ntypedef struct AVDeviceInfoList {\n    AVDeviceInfo **devices;              /**< list of autodetected devices */\n    int nb_devices;                      /**< number of autodetected devices */\n    int default_device;                  /**< index of default device or -1 if no default */\n} AVDeviceInfoList;\n\n/**\n * List devices.\n *\n * Returns available device names and their parameters.\n *\n * @note: Some devices may accept system-dependent device names that cannot be\n *        autodetected. The list returned by this function cannot be assumed to\n *        be always completed.\n *\n * @param s                device context.\n * @param[out] device_list list of autodetected devices.\n * @return count of autodetected devices, negative on error.\n */\nint avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);\n\n/**\n * Convinient function to free result of avdevice_list_devices().\n *\n * @param devices device list to be freed.\n */\nvoid avdevice_free_list_devices(AVDeviceInfoList **device_list);\n\n#endif /* AVDEVICE_AVDEVICE_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavdevice/version.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVDEVICE_VERSION_H\n#define AVDEVICE_VERSION_H\n\n/**\n * @file\n * @ingroup lavd\n * Libavdevice version macros\n */\n\n#include \"libavutil/version.h\"\n\n#define LIBAVDEVICE_VERSION_MAJOR  55\n#define LIBAVDEVICE_VERSION_MINOR  10\n#define LIBAVDEVICE_VERSION_MICRO 100\n\n#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \\\n                                               LIBAVDEVICE_VERSION_MINOR, \\\n                                               LIBAVDEVICE_VERSION_MICRO)\n#define LIBAVDEVICE_VERSION     AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \\\n                                           LIBAVDEVICE_VERSION_MINOR, \\\n                                           LIBAVDEVICE_VERSION_MICRO)\n#define LIBAVDEVICE_BUILD       LIBAVDEVICE_VERSION_INT\n\n#define LIBAVDEVICE_IDENT       \"Lavd\" AV_STRINGIFY(LIBAVDEVICE_VERSION)\n\n/**\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n */\n\n#endif /* AVDEVICE_VERSION_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavfilter/asrc_abuffer.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_ASRC_ABUFFER_H\n#define AVFILTER_ASRC_ABUFFER_H\n\n#include \"avfilter.h\"\n\n/**\n * @file\n * memory buffer source for audio\n *\n * @deprecated use buffersrc.h instead.\n */\n\n/**\n * Queue an audio buffer to the audio buffer source.\n *\n * @param abuffersrc audio source buffer context\n * @param data pointers to the samples planes\n * @param linesize linesizes of each audio buffer plane\n * @param nb_samples number of samples per channel\n * @param sample_fmt sample format of the audio data\n * @param ch_layout channel layout of the audio data\n * @param planar flag to indicate if audio data is planar or packed\n * @param pts presentation timestamp of the audio buffer\n * @param flags unused\n *\n * @deprecated use av_buffersrc_add_ref() instead.\n */\nattribute_deprecated\nint av_asrc_buffer_add_samples(AVFilterContext *abuffersrc,\n                               uint8_t *data[8], int linesize[8],\n                               int nb_samples, int sample_rate,\n                               int sample_fmt, int64_t ch_layout, int planar,\n                               int64_t pts, int av_unused flags);\n\n/**\n * Queue an audio buffer to the audio buffer source.\n *\n * This is similar to av_asrc_buffer_add_samples(), but the samples\n * are stored in a buffer with known size.\n *\n * @param abuffersrc audio source buffer context\n * @param buf pointer to the samples data, packed is assumed\n * @param size the size in bytes of the buffer, it must contain an\n * integer number of samples\n * @param sample_fmt sample format of the audio data\n * @param ch_layout channel layout of the audio data\n * @param pts presentation timestamp of the audio buffer\n * @param flags unused\n *\n * @deprecated use av_buffersrc_add_ref() instead.\n */\nattribute_deprecated\nint av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc,\n                              uint8_t *buf, int buf_size,\n                              int sample_rate,\n                              int sample_fmt, int64_t ch_layout, int planar,\n                              int64_t pts, int av_unused flags);\n\n/**\n * Queue an audio buffer to the audio buffer source.\n *\n * @param abuffersrc audio source buffer context\n * @param samplesref buffer ref to queue\n * @param flags unused\n *\n * @deprecated use av_buffersrc_add_ref() instead.\n */\nattribute_deprecated\nint av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc,\n                                        AVFilterBufferRef *samplesref,\n                                        int av_unused flags);\n\n#endif /* AVFILTER_ASRC_ABUFFER_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavfilter/avcodec.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_AVCODEC_H\n#define AVFILTER_AVCODEC_H\n\n/**\n * @file\n * libavcodec/libavfilter gluing utilities\n *\n * This should be included in an application ONLY if the installed\n * libavfilter has been compiled with libavcodec support, otherwise\n * symbols defined below will not be available.\n */\n\n#include \"avfilter.h\"\n\n#if FF_API_AVFILTERBUFFER\n/**\n * Create and return a picref reference from the data and properties\n * contained in frame.\n *\n * @param perms permissions to assign to the new buffer reference\n * @deprecated avfilter APIs work natively with AVFrame instead.\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms);\n\n\n/**\n * Create and return a picref reference from the data and properties\n * contained in frame.\n *\n * @param perms permissions to assign to the new buffer reference\n * @deprecated avfilter APIs work natively with AVFrame instead.\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,\n                                                            int perms);\n\n/**\n * Create and return a buffer reference from the data and properties\n * contained in frame.\n *\n * @param perms permissions to assign to the new buffer reference\n * @deprecated avfilter APIs work natively with AVFrame instead.\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,\n                                                      const AVFrame *frame,\n                                                      int perms);\n#endif\n\n#if FF_API_FILL_FRAME\n/**\n * Fill an AVFrame with the information stored in samplesref.\n *\n * @param frame an already allocated AVFrame\n * @param samplesref an audio buffer reference\n * @return >= 0 in case of success, a negative AVERROR code in case of\n * failure\n * @deprecated Use avfilter_copy_buf_props() instead.\n */\nattribute_deprecated\nint avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame,\n                                              const AVFilterBufferRef *samplesref);\n\n/**\n * Fill an AVFrame with the information stored in picref.\n *\n * @param frame an already allocated AVFrame\n * @param picref a video buffer reference\n * @return >= 0 in case of success, a negative AVERROR code in case of\n * failure\n * @deprecated Use avfilter_copy_buf_props() instead.\n */\nattribute_deprecated\nint avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame,\n                                              const AVFilterBufferRef *picref);\n\n/**\n * Fill an AVFrame with information stored in ref.\n *\n * @param frame an already allocated AVFrame\n * @param ref a video or audio buffer reference\n * @return >= 0 in case of success, a negative AVERROR code in case of\n * failure\n * @deprecated Use avfilter_copy_buf_props() instead.\n */\nattribute_deprecated\nint avfilter_fill_frame_from_buffer_ref(AVFrame *frame,\n                                        const AVFilterBufferRef *ref);\n#endif\n\n#endif /* AVFILTER_AVCODEC_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavfilter/avfilter.h",
    "content": "/*\n * filter layer\n * Copyright (c) 2007 Bobby Bingham\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_AVFILTER_H\n#define AVFILTER_AVFILTER_H\n\n/**\n * @file\n * @ingroup lavfi\n * Main libavfilter public API header\n */\n\n/**\n * @defgroup lavfi Libavfilter - graph-based frame editing library\n * @{\n */\n\n#include <stddef.h>\n\n#include \"libavutil/attributes.h\"\n#include \"libavutil/avutil.h\"\n#include \"libavutil/dict.h\"\n#include \"libavutil/frame.h\"\n#include \"libavutil/log.h\"\n#include \"libavutil/samplefmt.h\"\n#include \"libavutil/pixfmt.h\"\n#include \"libavutil/rational.h\"\n\n#include \"libavfilter/version.h\"\n\n/**\n * Return the LIBAVFILTER_VERSION_INT constant.\n */\nunsigned avfilter_version(void);\n\n/**\n * Return the libavfilter build-time configuration.\n */\nconst char *avfilter_configuration(void);\n\n/**\n * Return the libavfilter license.\n */\nconst char *avfilter_license(void);\n\ntypedef struct AVFilterContext AVFilterContext;\ntypedef struct AVFilterLink    AVFilterLink;\ntypedef struct AVFilterPad     AVFilterPad;\ntypedef struct AVFilterFormats AVFilterFormats;\n\n#if FF_API_AVFILTERBUFFER\n/**\n * A reference-counted buffer data type used by the filter system. Filters\n * should not store pointers to this structure directly, but instead use the\n * AVFilterBufferRef structure below.\n */\ntypedef struct AVFilterBuffer {\n    uint8_t *data[8];           ///< buffer data for each plane/channel\n\n    /**\n     * pointers to the data planes/channels.\n     *\n     * For video, this should simply point to data[].\n     *\n     * For planar audio, each channel has a separate data pointer, and\n     * linesize[0] contains the size of each channel buffer.\n     * For packed audio, there is just one data pointer, and linesize[0]\n     * contains the total size of the buffer for all channels.\n     *\n     * Note: Both data and extended_data will always be set, but for planar\n     * audio with more channels that can fit in data, extended_data must be used\n     * in order to access all channels.\n     */\n    uint8_t **extended_data;\n    int linesize[8];            ///< number of bytes per line\n\n    /** private data to be used by a custom free function */\n    void *priv;\n    /**\n     * A pointer to the function to deallocate this buffer if the default\n     * function is not sufficient. This could, for example, add the memory\n     * back into a memory pool to be reused later without the overhead of\n     * reallocating it from scratch.\n     */\n    void (*free)(struct AVFilterBuffer *buf);\n\n    int format;                 ///< media format\n    int w, h;                   ///< width and height of the allocated buffer\n    unsigned refcount;          ///< number of references to this buffer\n} AVFilterBuffer;\n\n#define AV_PERM_READ     0x01   ///< can read from the buffer\n#define AV_PERM_WRITE    0x02   ///< can write to the buffer\n#define AV_PERM_PRESERVE 0x04   ///< nobody else can overwrite the buffer\n#define AV_PERM_REUSE    0x08   ///< can output the buffer multiple times, with the same contents each time\n#define AV_PERM_REUSE2   0x10   ///< can output the buffer multiple times, modified each time\n#define AV_PERM_NEG_LINESIZES 0x20  ///< the buffer requested can have negative linesizes\n#define AV_PERM_ALIGN    0x40   ///< the buffer must be aligned\n\n#define AVFILTER_ALIGN 16 //not part of ABI\n\n/**\n * Audio specific properties in a reference to an AVFilterBuffer. Since\n * AVFilterBufferRef is common to different media formats, audio specific\n * per reference properties must be separated out.\n */\ntypedef struct AVFilterBufferRefAudioProps {\n    uint64_t channel_layout;    ///< channel layout of audio buffer\n    int nb_samples;             ///< number of audio samples per channel\n    int sample_rate;            ///< audio buffer sample rate\n    int channels;               ///< number of channels (do not access directly)\n} AVFilterBufferRefAudioProps;\n\n/**\n * Video specific properties in a reference to an AVFilterBuffer. Since\n * AVFilterBufferRef is common to different media formats, video specific\n * per reference properties must be separated out.\n */\ntypedef struct AVFilterBufferRefVideoProps {\n    int w;                      ///< image width\n    int h;                      ///< image height\n    AVRational sample_aspect_ratio; ///< sample aspect ratio\n    int interlaced;             ///< is frame interlaced\n    int top_field_first;        ///< field order\n    enum AVPictureType pict_type; ///< picture type of the frame\n    int key_frame;              ///< 1 -> keyframe, 0-> not\n    int qp_table_linesize;                ///< qp_table stride\n    int qp_table_size;            ///< qp_table size\n    int8_t *qp_table;             ///< array of Quantization Parameters\n} AVFilterBufferRefVideoProps;\n\n/**\n * A reference to an AVFilterBuffer. Since filters can manipulate the origin of\n * a buffer to, for example, crop image without any memcpy, the buffer origin\n * and dimensions are per-reference properties. Linesize is also useful for\n * image flipping, frame to field filters, etc, and so is also per-reference.\n *\n * TODO: add anything necessary for frame reordering\n */\ntypedef struct AVFilterBufferRef {\n    AVFilterBuffer *buf;        ///< the buffer that this is a reference to\n    uint8_t *data[8];           ///< picture/audio data for each plane\n    /**\n     * pointers to the data planes/channels.\n     *\n     * For video, this should simply point to data[].\n     *\n     * For planar audio, each channel has a separate data pointer, and\n     * linesize[0] contains the size of each channel buffer.\n     * For packed audio, there is just one data pointer, and linesize[0]\n     * contains the total size of the buffer for all channels.\n     *\n     * Note: Both data and extended_data will always be set, but for planar\n     * audio with more channels that can fit in data, extended_data must be used\n     * in order to access all channels.\n     */\n    uint8_t **extended_data;\n    int linesize[8];            ///< number of bytes per line\n\n    AVFilterBufferRefVideoProps *video; ///< video buffer specific properties\n    AVFilterBufferRefAudioProps *audio; ///< audio buffer specific properties\n\n    /**\n     * presentation timestamp. The time unit may change during\n     * filtering, as it is specified in the link and the filter code\n     * may need to rescale the PTS accordingly.\n     */\n    int64_t pts;\n    int64_t pos;                ///< byte position in stream, -1 if unknown\n\n    int format;                 ///< media format\n\n    int perms;                  ///< permissions, see the AV_PERM_* flags\n\n    enum AVMediaType type;      ///< media type of buffer data\n\n    AVDictionary *metadata;     ///< dictionary containing metadata key=value tags\n} AVFilterBufferRef;\n\n/**\n * Copy properties of src to dst, without copying the actual data\n */\nattribute_deprecated\nvoid avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src);\n\n/**\n * Add a new reference to a buffer.\n *\n * @param ref   an existing reference to the buffer\n * @param pmask a bitmask containing the allowable permissions in the new\n *              reference\n * @return      a new reference to the buffer with the same properties as the\n *              old, excluding any permissions denied by pmask\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask);\n\n/**\n * Remove a reference to a buffer. If this is the last reference to the\n * buffer, the buffer itself is also automatically freed.\n *\n * @param ref reference to the buffer, may be NULL\n *\n * @note it is recommended to use avfilter_unref_bufferp() instead of this\n * function\n */\nattribute_deprecated\nvoid avfilter_unref_buffer(AVFilterBufferRef *ref);\n\n/**\n * Remove a reference to a buffer and set the pointer to NULL.\n * If this is the last reference to the buffer, the buffer itself\n * is also automatically freed.\n *\n * @param ref pointer to the buffer reference\n */\nattribute_deprecated\nvoid avfilter_unref_bufferp(AVFilterBufferRef **ref);\n#endif\n\n/**\n * Get the number of channels of a buffer reference.\n */\nattribute_deprecated\nint avfilter_ref_get_channels(AVFilterBufferRef *ref);\n\n#if FF_API_AVFILTERPAD_PUBLIC\n/**\n * A filter pad used for either input or output.\n *\n * See doc/filter_design.txt for details on how to implement the methods.\n *\n * @warning this struct might be removed from public API.\n * users should call avfilter_pad_get_name() and avfilter_pad_get_type()\n * to access the name and type fields; there should be no need to access\n * any other fields from outside of libavfilter.\n */\nstruct AVFilterPad {\n    /**\n     * Pad name. The name is unique among inputs and among outputs, but an\n     * input may have the same name as an output. This may be NULL if this\n     * pad has no need to ever be referenced by name.\n     */\n    const char *name;\n\n    /**\n     * AVFilterPad type.\n     */\n    enum AVMediaType type;\n\n    /**\n     * Input pads:\n     * Minimum required permissions on incoming buffers. Any buffer with\n     * insufficient permissions will be automatically copied by the filter\n     * system to a new buffer which provides the needed access permissions.\n     *\n     * Output pads:\n     * Guaranteed permissions on outgoing buffers. Any buffer pushed on the\n     * link must have at least these permissions; this fact is checked by\n     * asserts. It can be used to optimize buffer allocation.\n     */\n    attribute_deprecated int min_perms;\n\n    /**\n     * Input pads:\n     * Permissions which are not accepted on incoming buffers. Any buffer\n     * which has any of these permissions set will be automatically copied\n     * by the filter system to a new buffer which does not have those\n     * permissions. This can be used to easily disallow buffers with\n     * AV_PERM_REUSE.\n     *\n     * Output pads:\n     * Permissions which are automatically removed on outgoing buffers. It\n     * can be used to optimize buffer allocation.\n     */\n    attribute_deprecated int rej_perms;\n\n    /**\n     * @deprecated unused\n     */\n    int (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref);\n\n    /**\n     * Callback function to get a video buffer. If NULL, the filter system will\n     * use ff_default_get_video_buffer().\n     *\n     * Input video pads only.\n     */\n    AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);\n\n    /**\n     * Callback function to get an audio buffer. If NULL, the filter system will\n     * use ff_default_get_audio_buffer().\n     *\n     * Input audio pads only.\n     */\n    AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);\n\n    /**\n     * @deprecated unused\n     */\n    int (*end_frame)(AVFilterLink *link);\n\n    /**\n     * @deprecated unused\n     */\n    int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir);\n\n    /**\n     * Filtering callback. This is where a filter receives a frame with\n     * audio/video data and should do its processing.\n     *\n     * Input pads only.\n     *\n     * @return >= 0 on success, a negative AVERROR on error. This function\n     * must ensure that frame is properly unreferenced on error if it\n     * hasn't been passed on to another filter.\n     */\n    int (*filter_frame)(AVFilterLink *link, AVFrame *frame);\n\n    /**\n     * Frame poll callback. This returns the number of immediately available\n     * samples. It should return a positive value if the next request_frame()\n     * is guaranteed to return one frame (with no delay).\n     *\n     * Defaults to just calling the source poll_frame() method.\n     *\n     * Output pads only.\n     */\n    int (*poll_frame)(AVFilterLink *link);\n\n    /**\n     * Frame request callback. A call to this should result in at least one\n     * frame being output over the given link. This should return zero on\n     * success, and another value on error.\n     * See ff_request_frame() for the error codes with a specific\n     * meaning.\n     *\n     * Output pads only.\n     */\n    int (*request_frame)(AVFilterLink *link);\n\n    /**\n     * Link configuration callback.\n     *\n     * For output pads, this should set the following link properties:\n     * video: width, height, sample_aspect_ratio, time_base\n     * audio: sample_rate.\n     *\n     * This should NOT set properties such as format, channel_layout, etc which\n     * are negotiated between filters by the filter system using the\n     * query_formats() callback before this function is called.\n     *\n     * For input pads, this should check the properties of the link, and update\n     * the filter's internal state as necessary.\n     *\n     * For both input and output pads, this should return zero on success,\n     * and another value on error.\n     */\n    int (*config_props)(AVFilterLink *link);\n\n    /**\n     * The filter expects a fifo to be inserted on its input link,\n     * typically because it has a delay.\n     *\n     * input pads only.\n     */\n    int needs_fifo;\n\n    /**\n     * The filter expects writable frames from its input link,\n     * duplicating data buffers if needed.\n     *\n     * input pads only.\n     */\n    int needs_writable;\n};\n#endif\n\n/**\n * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g.\n * AVFilter.inputs/outputs).\n */\nint avfilter_pad_count(const AVFilterPad *pads);\n\n/**\n * Get the name of an AVFilterPad.\n *\n * @param pads an array of AVFilterPads\n * @param pad_idx index of the pad in the array it; is the caller's\n *                responsibility to ensure the index is valid\n *\n * @return name of the pad_idx'th pad in pads\n */\nconst char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx);\n\n/**\n * Get the type of an AVFilterPad.\n *\n * @param pads an array of AVFilterPads\n * @param pad_idx index of the pad in the array; it is the caller's\n *                responsibility to ensure the index is valid\n *\n * @return type of the pad_idx'th pad in pads\n */\nenum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx);\n\n/**\n * The number of the filter inputs is not determined just by AVFilter.inputs.\n * The filter might add additional inputs during initialization depending on the\n * options supplied to it.\n */\n#define AVFILTER_FLAG_DYNAMIC_INPUTS        (1 << 0)\n/**\n * The number of the filter outputs is not determined just by AVFilter.outputs.\n * The filter might add additional outputs during initialization depending on\n * the options supplied to it.\n */\n#define AVFILTER_FLAG_DYNAMIC_OUTPUTS       (1 << 1)\n/**\n * The filter supports multithreading by splitting frames into multiple parts\n * and processing them concurrently.\n */\n#define AVFILTER_FLAG_SLICE_THREADS         (1 << 2)\n/**\n * Some filters support a generic \"enable\" expression option that can be used\n * to enable or disable a filter in the timeline. Filters supporting this\n * option have this flag set. When the enable expression is false, the default\n * no-op filter_frame() function is called in place of the filter_frame()\n * callback defined on each input pad, thus the frame is passed unchanged to\n * the next filters.\n */\n#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC  (1 << 16)\n/**\n * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will\n * have its filter_frame() callback(s) called as usual even when the enable\n * expression is false. The filter will disable filtering within the\n * filter_frame() callback(s) itself, for example executing code depending on\n * the AVFilterContext->is_disabled value.\n */\n#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17)\n/**\n * Handy mask to test whether the filter supports or no the timeline feature\n * (internally or generically).\n */\n#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL)\n\n/**\n * Filter definition. This defines the pads a filter contains, and all the\n * callback functions used to interact with the filter.\n */\ntypedef struct AVFilter {\n    /**\n     * Filter name. Must be non-NULL and unique among filters.\n     */\n    const char *name;\n\n    /**\n     * A description of the filter. May be NULL.\n     *\n     * You should use the NULL_IF_CONFIG_SMALL() macro to define it.\n     */\n    const char *description;\n\n    /**\n     * List of inputs, terminated by a zeroed element.\n     *\n     * NULL if there are no (static) inputs. Instances of filters with\n     * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in\n     * this list.\n     */\n    const AVFilterPad *inputs;\n    /**\n     * List of outputs, terminated by a zeroed element.\n     *\n     * NULL if there are no (static) outputs. Instances of filters with\n     * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in\n     * this list.\n     */\n    const AVFilterPad *outputs;\n\n    /**\n     * A class for the private data, used to declare filter private AVOptions.\n     * This field is NULL for filters that do not declare any options.\n     *\n     * If this field is non-NULL, the first member of the filter private data\n     * must be a pointer to AVClass, which will be set by libavfilter generic\n     * code to this class.\n     */\n    const AVClass *priv_class;\n\n    /**\n     * A combination of AVFILTER_FLAG_*\n     */\n    int flags;\n\n    /*****************************************************************\n     * All fields below this line are not part of the public API. They\n     * may not be used outside of libavfilter and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n\n    /**\n     * Filter initialization function.\n     *\n     * This callback will be called only once during the filter lifetime, after\n     * all the options have been set, but before links between filters are\n     * established and format negotiation is done.\n     *\n     * Basic filter initialization should be done here. Filters with dynamic\n     * inputs and/or outputs should create those inputs/outputs here based on\n     * provided options. No more changes to this filter's inputs/outputs can be\n     * done after this callback.\n     *\n     * This callback must not assume that the filter links exist or frame\n     * parameters are known.\n     *\n     * @ref AVFilter.uninit \"uninit\" is guaranteed to be called even if\n     * initialization fails, so this callback does not have to clean up on\n     * failure.\n     *\n     * @return 0 on success, a negative AVERROR on failure\n     */\n    int (*init)(AVFilterContext *ctx);\n\n    /**\n     * Should be set instead of @ref AVFilter.init \"init\" by the filters that\n     * want to pass a dictionary of AVOptions to nested contexts that are\n     * allocated during init.\n     *\n     * On return, the options dict should be freed and replaced with one that\n     * contains all the options which could not be processed by this filter (or\n     * with NULL if all the options were processed).\n     *\n     * Otherwise the semantics is the same as for @ref AVFilter.init \"init\".\n     */\n    int (*init_dict)(AVFilterContext *ctx, AVDictionary **options);\n\n    /**\n     * Filter uninitialization function.\n     *\n     * Called only once right before the filter is freed. Should deallocate any\n     * memory held by the filter, release any buffer references, etc. It does\n     * not need to deallocate the AVFilterContext.priv memory itself.\n     *\n     * This callback may be called even if @ref AVFilter.init \"init\" was not\n     * called or failed, so it must be prepared to handle such a situation.\n     */\n    void (*uninit)(AVFilterContext *ctx);\n\n    /**\n     * Query formats supported by the filter on its inputs and outputs.\n     *\n     * This callback is called after the filter is initialized (so the inputs\n     * and outputs are fixed), shortly before the format negotiation. This\n     * callback may be called more than once.\n     *\n     * This callback must set AVFilterLink.out_formats on every input link and\n     * AVFilterLink.in_formats on every output link to a list of pixel/sample\n     * formats that the filter supports on that link. For audio links, this\n     * filter must also set @ref AVFilterLink.in_samplerates \"in_samplerates\" /\n     * @ref AVFilterLink.out_samplerates \"out_samplerates\" and\n     * @ref AVFilterLink.in_channel_layouts \"in_channel_layouts\" /\n     * @ref AVFilterLink.out_channel_layouts \"out_channel_layouts\" analogously.\n     *\n     * This callback may be NULL for filters with one input, in which case\n     * libavfilter assumes that it supports all input formats and preserves\n     * them on output.\n     *\n     * @return zero on success, a negative value corresponding to an\n     * AVERROR code otherwise\n     */\n    int (*query_formats)(AVFilterContext *);\n\n    int priv_size;      ///< size of private data to allocate for the filter\n\n    /**\n     * Used by the filter registration system. Must not be touched by any other\n     * code.\n     */\n    struct AVFilter *next;\n\n    /**\n     * Make the filter instance process a command.\n     *\n     * @param cmd    the command to process, for handling simplicity all commands must be alphanumeric only\n     * @param arg    the argument for the command\n     * @param res    a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported.\n     * @param flags  if AVFILTER_CMD_FLAG_FAST is set and the command would be\n     *               time consuming then a filter should treat it like an unsupported command\n     *\n     * @returns >=0 on success otherwise an error code.\n     *          AVERROR(ENOSYS) on unsupported commands\n     */\n    int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags);\n\n    /**\n     * Filter initialization function, alternative to the init()\n     * callback. Args contains the user-supplied parameters, opaque is\n     * used for providing binary data.\n     */\n    int (*init_opaque)(AVFilterContext *ctx, void *opaque);\n} AVFilter;\n\n/**\n * Process multiple parts of the frame concurrently.\n */\n#define AVFILTER_THREAD_SLICE (1 << 0)\n\ntypedef struct AVFilterInternal AVFilterInternal;\n\n/** An instance of a filter */\nstruct AVFilterContext {\n    const AVClass *av_class;        ///< needed for av_log() and filters common options\n\n    const AVFilter *filter;         ///< the AVFilter of which this is an instance\n\n    char *name;                     ///< name of this filter instance\n\n    AVFilterPad   *input_pads;      ///< array of input pads\n    AVFilterLink **inputs;          ///< array of pointers to input links\n#if FF_API_FOO_COUNT\n    attribute_deprecated unsigned input_count; ///< @deprecated use nb_inputs\n#endif\n    unsigned    nb_inputs;          ///< number of input pads\n\n    AVFilterPad   *output_pads;     ///< array of output pads\n    AVFilterLink **outputs;         ///< array of pointers to output links\n#if FF_API_FOO_COUNT\n    attribute_deprecated unsigned output_count; ///< @deprecated use nb_outputs\n#endif\n    unsigned    nb_outputs;         ///< number of output pads\n\n    void *priv;                     ///< private data for use by the filter\n\n    struct AVFilterGraph *graph;    ///< filtergraph this filter belongs to\n\n    /**\n     * Type of multithreading being allowed/used. A combination of\n     * AVFILTER_THREAD_* flags.\n     *\n     * May be set by the caller before initializing the filter to forbid some\n     * or all kinds of multithreading for this filter. The default is allowing\n     * everything.\n     *\n     * When the filter is initialized, this field is combined using bit AND with\n     * AVFilterGraph.thread_type to get the final mask used for determining\n     * allowed threading types. I.e. a threading type needs to be set in both\n     * to be allowed.\n     *\n     * After the filter is initialzed, libavfilter sets this field to the\n     * threading type that is actually used (0 for no multithreading).\n     */\n    int thread_type;\n\n    /**\n     * An opaque struct for libavfilter internal use.\n     */\n    AVFilterInternal *internal;\n\n    struct AVFilterCommand *command_queue;\n\n    char *enable_str;               ///< enable expression string\n    void *enable;                   ///< parsed expression (AVExpr*)\n    double *var_values;             ///< variable values for the enable expression\n    int is_disabled;                ///< the enabled state from the last expression evaluation\n};\n\n/**\n * A link between two filters. This contains pointers to the source and\n * destination filters between which this link exists, and the indexes of\n * the pads involved. In addition, this link also contains the parameters\n * which have been negotiated and agreed upon between the filter, such as\n * image dimensions, format, etc.\n */\nstruct AVFilterLink {\n    AVFilterContext *src;       ///< source filter\n    AVFilterPad *srcpad;        ///< output pad on the source filter\n\n    AVFilterContext *dst;       ///< dest filter\n    AVFilterPad *dstpad;        ///< input pad on the dest filter\n\n    enum AVMediaType type;      ///< filter media type\n\n    /* These parameters apply only to video */\n    int w;                      ///< agreed upon image width\n    int h;                      ///< agreed upon image height\n    AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio\n    /* These parameters apply only to audio */\n    uint64_t channel_layout;    ///< channel layout of current buffer (see libavutil/channel_layout.h)\n    int sample_rate;            ///< samples per second\n\n    int format;                 ///< agreed upon media format\n\n    /**\n     * Define the time base used by the PTS of the frames/samples\n     * which will pass through this link.\n     * During the configuration stage, each filter is supposed to\n     * change only the output timebase, while the timebase of the\n     * input link is assumed to be an unchangeable property.\n     */\n    AVRational time_base;\n\n    /*****************************************************************\n     * All fields below this line are not part of the public API. They\n     * may not be used outside of libavfilter and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    /**\n     * Lists of formats and channel layouts supported by the input and output\n     * filters respectively. These lists are used for negotiating the format\n     * to actually be used, which will be loaded into the format and\n     * channel_layout members, above, when chosen.\n     *\n     */\n    AVFilterFormats *in_formats;\n    AVFilterFormats *out_formats;\n\n    /**\n     * Lists of channel layouts and sample rates used for automatic\n     * negotiation.\n     */\n    AVFilterFormats  *in_samplerates;\n    AVFilterFormats *out_samplerates;\n    struct AVFilterChannelLayouts  *in_channel_layouts;\n    struct AVFilterChannelLayouts *out_channel_layouts;\n\n    /**\n     * Audio only, the destination filter sets this to a non-zero value to\n     * request that buffers with the given number of samples should be sent to\n     * it. AVFilterPad.needs_fifo must also be set on the corresponding input\n     * pad.\n     * Last buffer before EOF will be padded with silence.\n     */\n    int request_samples;\n\n    /** stage of the initialization of the link properties (dimensions, etc) */\n    enum {\n        AVLINK_UNINIT = 0,      ///< not started\n        AVLINK_STARTINIT,       ///< started, but incomplete\n        AVLINK_INIT             ///< complete\n    } init_state;\n\n    struct AVFilterPool *pool;\n\n    /**\n     * Graph the filter belongs to.\n     */\n    struct AVFilterGraph *graph;\n\n    /**\n     * Current timestamp of the link, as defined by the most recent\n     * frame(s), in AV_TIME_BASE units.\n     */\n    int64_t current_pts;\n\n    /**\n     * Index in the age array.\n     */\n    int age_index;\n\n    /**\n     * Frame rate of the stream on the link, or 1/0 if unknown;\n     * if left to 0/0, will be automatically be copied from the first input\n     * of the source filter if it exists.\n     *\n     * Sources should set it to the best estimation of the real frame rate.\n     * Filters should update it if necessary depending on their function.\n     * Sinks can use it to set a default output frame rate.\n     * It is similar to the r_frame_rate field in AVStream.\n     */\n    AVRational frame_rate;\n\n    /**\n     * Buffer partially filled with samples to achieve a fixed/minimum size.\n     */\n    AVFrame *partial_buf;\n\n    /**\n     * Size of the partial buffer to allocate.\n     * Must be between min_samples and max_samples.\n     */\n    int partial_buf_size;\n\n    /**\n     * Minimum number of samples to filter at once. If filter_frame() is\n     * called with fewer samples, it will accumulate them in partial_buf.\n     * This field and the related ones must not be changed after filtering\n     * has started.\n     * If 0, all related fields are ignored.\n     */\n    int min_samples;\n\n    /**\n     * Maximum number of samples to filter at once. If filter_frame() is\n     * called with more samples, it will split them.\n     */\n    int max_samples;\n\n    /**\n     * The buffer reference currently being received across the link by the\n     * destination filter. This is used internally by the filter system to\n     * allow automatic copying of buffers which do not have sufficient\n     * permissions for the destination. This should not be accessed directly\n     * by the filters.\n     */\n    AVFilterBufferRef *cur_buf_copy;\n\n    /**\n     * True if the link is closed.\n     * If set, all attemps of start_frame, filter_frame or request_frame\n     * will fail with AVERROR_EOF, and if necessary the reference will be\n     * destroyed.\n     * If request_frame returns AVERROR_EOF, this flag is set on the\n     * corresponding link.\n     * It can be set also be set by either the source or the destination\n     * filter.\n     */\n    int closed;\n\n    /**\n     * Number of channels.\n     */\n    int channels;\n\n    /**\n     * True if a frame is being requested on the link.\n     * Used internally by the framework.\n     */\n    unsigned frame_requested;\n\n    /**\n     * Link processing flags.\n     */\n    unsigned flags;\n\n    /**\n     * Number of past frames sent through the link.\n     */\n    int64_t frame_count;\n};\n\n/**\n * Link two filters together.\n *\n * @param src    the source filter\n * @param srcpad index of the output pad on the source filter\n * @param dst    the destination filter\n * @param dstpad index of the input pad on the destination filter\n * @return       zero on success\n */\nint avfilter_link(AVFilterContext *src, unsigned srcpad,\n                  AVFilterContext *dst, unsigned dstpad);\n\n/**\n * Free the link in *link, and set its pointer to NULL.\n */\nvoid avfilter_link_free(AVFilterLink **link);\n\n/**\n * Get the number of channels of a link.\n */\nint avfilter_link_get_channels(AVFilterLink *link);\n\n/**\n * Set the closed field of a link.\n */\nvoid avfilter_link_set_closed(AVFilterLink *link, int closed);\n\n/**\n * Negotiate the media format, dimensions, etc of all inputs to a filter.\n *\n * @param filter the filter to negotiate the properties for its inputs\n * @return       zero on successful negotiation\n */\nint avfilter_config_links(AVFilterContext *filter);\n\n#if FF_API_AVFILTERBUFFER\n/**\n * Create a buffer reference wrapped around an already allocated image\n * buffer.\n *\n * @param data pointers to the planes of the image to reference\n * @param linesize linesizes for the planes of the image to reference\n * @param perms the required access permissions\n * @param w the width of the image specified by the data and linesize arrays\n * @param h the height of the image specified by the data and linesize arrays\n * @param format the pixel format of the image specified by the data and linesize arrays\n */\nattribute_deprecated\nAVFilterBufferRef *\navfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,\n                                          int w, int h, enum AVPixelFormat format);\n\n/**\n * Create an audio buffer reference wrapped around an already\n * allocated samples buffer.\n *\n * See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version\n * that can handle unknown channel layouts.\n *\n * @param data           pointers to the samples plane buffers\n * @param linesize       linesize for the samples plane buffers\n * @param perms          the required access permissions\n * @param nb_samples     number of samples per channel\n * @param sample_fmt     the format of each sample in the buffer to allocate\n * @param channel_layout the channel layout of the buffer\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,\n                                                             int linesize,\n                                                             int perms,\n                                                             int nb_samples,\n                                                             enum AVSampleFormat sample_fmt,\n                                                             uint64_t channel_layout);\n/**\n * Create an audio buffer reference wrapped around an already\n * allocated samples buffer.\n *\n * @param data           pointers to the samples plane buffers\n * @param linesize       linesize for the samples plane buffers\n * @param perms          the required access permissions\n * @param nb_samples     number of samples per channel\n * @param sample_fmt     the format of each sample in the buffer to allocate\n * @param channels       the number of channels of the buffer\n * @param channel_layout the channel layout of the buffer,\n *                       must be either 0 or consistent with channels\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data,\n                                                                      int linesize,\n                                                                      int perms,\n                                                                      int nb_samples,\n                                                                      enum AVSampleFormat sample_fmt,\n                                                                      int channels,\n                                                                      uint64_t channel_layout);\n\n#endif\n\n\n#define AVFILTER_CMD_FLAG_ONE   1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically\n#define AVFILTER_CMD_FLAG_FAST  2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw)\n\n/**\n * Make the filter instance process a command.\n * It is recommended to use avfilter_graph_send_command().\n */\nint avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags);\n\n/** Initialize the filter system. Register all builtin filters. */\nvoid avfilter_register_all(void);\n\n#if FF_API_OLD_FILTER_REGISTER\n/** Uninitialize the filter system. Unregister all filters. */\nattribute_deprecated\nvoid avfilter_uninit(void);\n#endif\n\n/**\n * Register a filter. This is only needed if you plan to use\n * avfilter_get_by_name later to lookup the AVFilter structure by name. A\n * filter can still by instantiated with avfilter_graph_alloc_filter even if it\n * is not registered.\n *\n * @param filter the filter to register\n * @return 0 if the registration was successful, a negative value\n * otherwise\n */\nint avfilter_register(AVFilter *filter);\n\n/**\n * Get a filter definition matching the given name.\n *\n * @param name the filter name to find\n * @return     the filter definition, if any matching one is registered.\n *             NULL if none found.\n */\n#if !FF_API_NOCONST_GET_NAME\nconst\n#endif\nAVFilter *avfilter_get_by_name(const char *name);\n\n/**\n * Iterate over all registered filters.\n * @return If prev is non-NULL, next registered filter after prev or NULL if\n * prev is the last filter. If prev is NULL, return the first registered filter.\n */\nconst AVFilter *avfilter_next(const AVFilter *prev);\n\n#if FF_API_OLD_FILTER_REGISTER\n/**\n * If filter is NULL, returns a pointer to the first registered filter pointer,\n * if filter is non-NULL, returns the next pointer after filter.\n * If the returned pointer points to NULL, the last registered filter\n * was already reached.\n * @deprecated use avfilter_next()\n */\nattribute_deprecated\nAVFilter **av_filter_next(AVFilter **filter);\n#endif\n\n#if FF_API_AVFILTER_OPEN\n/**\n * Create a filter instance.\n *\n * @param filter_ctx put here a pointer to the created filter context\n * on success, NULL on failure\n * @param filter    the filter to create an instance of\n * @param inst_name Name to give to the new instance. Can be NULL for none.\n * @return >= 0 in case of success, a negative error code otherwise\n * @deprecated use avfilter_graph_alloc_filter() instead\n */\nattribute_deprecated\nint avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name);\n#endif\n\n\n#if FF_API_AVFILTER_INIT_FILTER\n/**\n * Initialize a filter.\n *\n * @param filter the filter to initialize\n * @param args   A string of parameters to use when initializing the filter.\n *               The format and meaning of this string varies by filter.\n * @param opaque Any extra non-string data needed by the filter. The meaning\n *               of this parameter varies by filter.\n * @return       zero on success\n */\nattribute_deprecated\nint avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque);\n#endif\n\n/**\n * Initialize a filter with the supplied parameters.\n *\n * @param ctx  uninitialized filter context to initialize\n * @param args Options to initialize the filter with. This must be a\n *             ':'-separated list of options in the 'key=value' form.\n *             May be NULL if the options have been set directly using the\n *             AVOptions API or there are no options that need to be set.\n * @return 0 on success, a negative AVERROR on failure\n */\nint avfilter_init_str(AVFilterContext *ctx, const char *args);\n\n/**\n * Initialize a filter with the supplied dictionary of options.\n *\n * @param ctx     uninitialized filter context to initialize\n * @param options An AVDictionary filled with options for this filter. On\n *                return this parameter will be destroyed and replaced with\n *                a dict containing options that were not found. This dictionary\n *                must be freed by the caller.\n *                May be NULL, then this function is equivalent to\n *                avfilter_init_str() with the second parameter set to NULL.\n * @return 0 on success, a negative AVERROR on failure\n *\n * @note This function and avfilter_init_str() do essentially the same thing,\n * the difference is in manner in which the options are passed. It is up to the\n * calling code to choose whichever is more preferable. The two functions also\n * behave differently when some of the provided options are not declared as\n * supported by the filter. In such a case, avfilter_init_str() will fail, but\n * this function will leave those extra options in the options AVDictionary and\n * continue as usual.\n */\nint avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options);\n\n/**\n * Free a filter context. This will also remove the filter from its\n * filtergraph's list of filters.\n *\n * @param filter the filter to free\n */\nvoid avfilter_free(AVFilterContext *filter);\n\n/**\n * Insert a filter in the middle of an existing link.\n *\n * @param link the link into which the filter should be inserted\n * @param filt the filter to be inserted\n * @param filt_srcpad_idx the input pad on the filter to connect\n * @param filt_dstpad_idx the output pad on the filter to connect\n * @return     zero on success\n */\nint avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,\n                           unsigned filt_srcpad_idx, unsigned filt_dstpad_idx);\n\n#if FF_API_AVFILTERBUFFER\n/**\n * Copy the frame properties of src to dst, without copying the actual\n * image data.\n *\n * @return 0 on success, a negative number on error.\n */\nattribute_deprecated\nint avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);\n\n/**\n * Copy the frame properties and data pointers of src to dst, without copying\n * the actual data.\n *\n * @return 0 on success, a negative number on error.\n */\nattribute_deprecated\nint avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src);\n#endif\n\n/**\n * @return AVClass for AVFilterContext.\n *\n * @see av_opt_find().\n */\nconst AVClass *avfilter_get_class(void);\n\ntypedef struct AVFilterGraphInternal AVFilterGraphInternal;\n\n/**\n * A function pointer passed to the @ref AVFilterGraph.execute callback to be\n * executed multiple times, possibly in parallel.\n *\n * @param ctx the filter context the job belongs to\n * @param arg an opaque parameter passed through from @ref\n *            AVFilterGraph.execute\n * @param jobnr the index of the job being executed\n * @param nb_jobs the total number of jobs\n *\n * @return 0 on success, a negative AVERROR on error\n */\ntypedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);\n\n/**\n * A function executing multiple jobs, possibly in parallel.\n *\n * @param ctx the filter context to which the jobs belong\n * @param func the function to be called multiple times\n * @param arg the argument to be passed to func\n * @param ret a nb_jobs-sized array to be filled with return values from each\n *            invocation of func\n * @param nb_jobs the number of jobs to execute\n *\n * @return 0 on success, a negative AVERROR on error\n */\ntypedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func,\n                                    void *arg, int *ret, int nb_jobs);\n\ntypedef struct AVFilterGraph {\n    const AVClass *av_class;\n#if FF_API_FOO_COUNT\n    attribute_deprecated\n    unsigned filter_count_unused;\n#endif\n    AVFilterContext **filters;\n#if !FF_API_FOO_COUNT\n    unsigned nb_filters;\n#endif\n\n    char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters\n    char *resample_lavr_opts;   ///< libavresample options to use for the auto-inserted resample filters\n#if FF_API_FOO_COUNT\n    unsigned nb_filters;\n#endif\n\n    /**\n     * Type of multithreading allowed for filters in this graph. A combination\n     * of AVFILTER_THREAD_* flags.\n     *\n     * May be set by the caller at any point, the setting will apply to all\n     * filters initialized after that. The default is allowing everything.\n     *\n     * When a filter in this graph is initialized, this field is combined using\n     * bit AND with AVFilterContext.thread_type to get the final mask used for\n     * determining allowed threading types. I.e. a threading type needs to be\n     * set in both to be allowed.\n     */\n    int thread_type;\n\n    /**\n     * Maximum number of threads used by filters in this graph. May be set by\n     * the caller before adding any filters to the filtergraph. Zero (the\n     * default) means that the number of threads is determined automatically.\n     */\n    int nb_threads;\n\n    /**\n     * Opaque object for libavfilter internal use.\n     */\n    AVFilterGraphInternal *internal;\n\n    /**\n     * Opaque user data. May be set by the caller to an arbitrary value, e.g. to\n     * be used from callbacks like @ref AVFilterGraph.execute.\n     * Libavfilter will not touch this field in any way.\n     */\n    void *opaque;\n\n    /**\n     * This callback may be set by the caller immediately after allocating the\n     * graph and before adding any filters to it, to provide a custom\n     * multithreading implementation.\n     *\n     * If set, filters with slice threading capability will call this callback\n     * to execute multiple jobs in parallel.\n     *\n     * If this field is left unset, libavfilter will use its internal\n     * implementation, which may or may not be multithreaded depending on the\n     * platform and build options.\n     */\n    avfilter_execute_func *execute;\n\n    char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions\n\n    /**\n     * Private fields\n     *\n     * The following fields are for internal use only.\n     * Their type, offset, number and semantic can change without notice.\n     */\n\n    AVFilterLink **sink_links;\n    int sink_links_count;\n\n    unsigned disable_auto_convert;\n} AVFilterGraph;\n\n/**\n * Allocate a filter graph.\n */\nAVFilterGraph *avfilter_graph_alloc(void);\n\n/**\n * Create a new filter instance in a filter graph.\n *\n * @param graph graph in which the new filter will be used\n * @param filter the filter to create an instance of\n * @param name Name to give to the new instance (will be copied to\n *             AVFilterContext.name). This may be used by the caller to identify\n *             different filters, libavfilter itself assigns no semantics to\n *             this parameter. May be NULL.\n *\n * @return the context of the newly created filter instance (note that it is\n *         also retrievable directly through AVFilterGraph.filters or with\n *         avfilter_graph_get_filter()) on success or NULL or failure.\n */\nAVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,\n                                             const AVFilter *filter,\n                                             const char *name);\n\n/**\n * Get a filter instance with name name from graph.\n *\n * @return the pointer to the found filter instance or NULL if it\n * cannot be found.\n */\nAVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name);\n\n#if FF_API_AVFILTER_OPEN\n/**\n * Add an existing filter instance to a filter graph.\n *\n * @param graphctx  the filter graph\n * @param filter the filter to be added\n *\n * @deprecated use avfilter_graph_alloc_filter() to allocate a filter in a\n * filter graph\n */\nattribute_deprecated\nint avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter);\n#endif\n\n/**\n * Create and add a filter instance into an existing graph.\n * The filter instance is created from the filter filt and inited\n * with the parameters args and opaque.\n *\n * In case of success put in *filt_ctx the pointer to the created\n * filter instance, otherwise set *filt_ctx to NULL.\n *\n * @param name the instance name to give to the created filter instance\n * @param graph_ctx the filter graph\n * @return a negative AVERROR error code in case of failure, a non\n * negative value otherwise\n */\nint avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,\n                                 const char *name, const char *args, void *opaque,\n                                 AVFilterGraph *graph_ctx);\n\n/**\n * Enable or disable automatic format conversion inside the graph.\n *\n * Note that format conversion can still happen inside explicitly inserted\n * scale and aresample filters.\n *\n * @param flags  any of the AVFILTER_AUTO_CONVERT_* constants\n */\nvoid avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags);\n\nenum {\n    AVFILTER_AUTO_CONVERT_ALL  =  0, /**< all automatic conversions enabled */\n    AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */\n};\n\n/**\n * Check validity and configure all the links and formats in the graph.\n *\n * @param graphctx the filter graph\n * @param log_ctx context used for logging\n * @return >= 0 in case of success, a negative AVERROR code otherwise\n */\nint avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx);\n\n/**\n * Free a graph, destroy its links, and set *graph to NULL.\n * If *graph is NULL, do nothing.\n */\nvoid avfilter_graph_free(AVFilterGraph **graph);\n\n/**\n * A linked-list of the inputs/outputs of the filter chain.\n *\n * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(),\n * where it is used to communicate open (unlinked) inputs and outputs from and\n * to the caller.\n * This struct specifies, per each not connected pad contained in the graph, the\n * filter context and the pad index required for establishing a link.\n */\ntypedef struct AVFilterInOut {\n    /** unique name for this input/output in the list */\n    char *name;\n\n    /** filter context associated to this input/output */\n    AVFilterContext *filter_ctx;\n\n    /** index of the filt_ctx pad to use for linking */\n    int pad_idx;\n\n    /** next input/input in the list, NULL if this is the last */\n    struct AVFilterInOut *next;\n} AVFilterInOut;\n\n/**\n * Allocate a single AVFilterInOut entry.\n * Must be freed with avfilter_inout_free().\n * @return allocated AVFilterInOut on success, NULL on failure.\n */\nAVFilterInOut *avfilter_inout_alloc(void);\n\n/**\n * Free the supplied list of AVFilterInOut and set *inout to NULL.\n * If *inout is NULL, do nothing.\n */\nvoid avfilter_inout_free(AVFilterInOut **inout);\n\n#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE\n/**\n * Add a graph described by a string to a graph.\n *\n * @note The caller must provide the lists of inputs and outputs,\n * which therefore must be known before calling the function.\n *\n * @note The inputs parameter describes inputs of the already existing\n * part of the graph; i.e. from the point of view of the newly created\n * part, they are outputs. Similarly the outputs parameter describes\n * outputs of the already existing filters, which are provided as\n * inputs to the parsed filters.\n *\n * @param graph   the filter graph where to link the parsed grap context\n * @param filters string to be parsed\n * @param inputs  linked list to the inputs of the graph\n * @param outputs linked list to the outputs of the graph\n * @return zero on success, a negative AVERROR code on error\n */\nint avfilter_graph_parse(AVFilterGraph *graph, const char *filters,\n                         AVFilterInOut *inputs, AVFilterInOut *outputs,\n                         void *log_ctx);\n#else\n/**\n * Add a graph described by a string to a graph.\n *\n * @param graph   the filter graph where to link the parsed graph context\n * @param filters string to be parsed\n * @param inputs  pointer to a linked list to the inputs of the graph, may be NULL.\n *                If non-NULL, *inputs is updated to contain the list of open inputs\n *                after the parsing, should be freed with avfilter_inout_free().\n * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.\n *                If non-NULL, *outputs is updated to contain the list of open outputs\n *                after the parsing, should be freed with avfilter_inout_free().\n * @return non negative on success, a negative AVERROR code on error\n * @deprecated Use avfilter_graph_parse_ptr() instead.\n */\nattribute_deprecated\nint avfilter_graph_parse(AVFilterGraph *graph, const char *filters,\n                         AVFilterInOut **inputs, AVFilterInOut **outputs,\n                         void *log_ctx);\n#endif\n\n/**\n * Add a graph described by a string to a graph.\n *\n * @param graph   the filter graph where to link the parsed graph context\n * @param filters string to be parsed\n * @param inputs  pointer to a linked list to the inputs of the graph, may be NULL.\n *                If non-NULL, *inputs is updated to contain the list of open inputs\n *                after the parsing, should be freed with avfilter_inout_free().\n * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.\n *                If non-NULL, *outputs is updated to contain the list of open outputs\n *                after the parsing, should be freed with avfilter_inout_free().\n * @return non negative on success, a negative AVERROR code on error\n */\nint avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,\n                             AVFilterInOut **inputs, AVFilterInOut **outputs,\n                             void *log_ctx);\n\n/**\n * Add a graph described by a string to a graph.\n *\n * @param[in]  graph   the filter graph where to link the parsed graph context\n * @param[in]  filters string to be parsed\n * @param[out] inputs  a linked list of all free (unlinked) inputs of the\n *                     parsed graph will be returned here. It is to be freed\n *                     by the caller using avfilter_inout_free().\n * @param[out] outputs a linked list of all free (unlinked) outputs of the\n *                     parsed graph will be returned here. It is to be freed by the\n *                     caller using avfilter_inout_free().\n * @return zero on success, a negative AVERROR code on error\n *\n * @note This function returns the inputs and outputs that are left\n * unlinked after parsing the graph and the caller then deals with\n * them.\n * @note This function makes no reference whatsoever to already\n * existing parts of the graph and the inputs parameter will on return\n * contain inputs of the newly parsed part of the graph.  Analogously\n * the outputs parameter will contain outputs of the newly created\n * filters.\n */\nint avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,\n                          AVFilterInOut **inputs,\n                          AVFilterInOut **outputs);\n\n/**\n * Send a command to one or more filter instances.\n *\n * @param graph  the filter graph\n * @param target the filter(s) to which the command should be sent\n *               \"all\" sends to all filters\n *               otherwise it can be a filter or filter instance name\n *               which will send the command to all matching filters.\n * @param cmd    the command to send, for handling simplicity all commands must be alphanumeric only\n * @param arg    the argument for the command\n * @param res    a buffer with size res_size where the filter(s) can return a response.\n *\n * @returns >=0 on success otherwise an error code.\n *              AVERROR(ENOSYS) on unsupported commands\n */\nint avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags);\n\n/**\n * Queue a command for one or more filter instances.\n *\n * @param graph  the filter graph\n * @param target the filter(s) to which the command should be sent\n *               \"all\" sends to all filters\n *               otherwise it can be a filter or filter instance name\n *               which will send the command to all matching filters.\n * @param cmd    the command to sent, for handling simplicity all commands must be alphanummeric only\n * @param arg    the argument for the command\n * @param ts     time at which the command should be sent to the filter\n *\n * @note As this executes commands after this function returns, no return code\n *       from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported.\n */\nint avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts);\n\n\n/**\n * Dump a graph into a human-readable string representation.\n *\n * @param graph    the graph to dump\n * @param options  formatting options; currently ignored\n * @return  a string, or NULL in case of memory allocation failure;\n *          the string must be freed using av_free\n */\nchar *avfilter_graph_dump(AVFilterGraph *graph, const char *options);\n\n/**\n * Request a frame on the oldest sink link.\n *\n * If the request returns AVERROR_EOF, try the next.\n *\n * Note that this function is not meant to be the sole scheduling mechanism\n * of a filtergraph, only a convenience function to help drain a filtergraph\n * in a balanced way under normal circumstances.\n *\n * Also note that AVERROR_EOF does not mean that frames did not arrive on\n * some of the sinks during the process.\n * When there are multiple sink links, in case the requested link\n * returns an EOF, this may cause a filter to flush pending frames\n * which are sent to another sink link, although unrequested.\n *\n * @return  the return value of ff_request_frame(),\n *          or AVERROR_EOF if all links returned AVERROR_EOF\n */\nint avfilter_graph_request_oldest(AVFilterGraph *graph);\n\n/**\n * @}\n */\n\n#endif /* AVFILTER_AVFILTER_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavfilter/avfiltergraph.h",
    "content": "/*\n * Filter graphs\n * copyright (c) 2007 Bobby Bingham\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_AVFILTERGRAPH_H\n#define AVFILTER_AVFILTERGRAPH_H\n\n#include \"avfilter.h\"\n#include \"libavutil/log.h\"\n\n#endif /* AVFILTER_AVFILTERGRAPH_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavfilter/buffersink.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_BUFFERSINK_H\n#define AVFILTER_BUFFERSINK_H\n\n/**\n * @file\n * @ingroup lavfi_buffersink\n * memory buffer sink API for audio and video\n */\n\n#include \"avfilter.h\"\n\n/**\n * @defgroup lavfi_buffersink Buffer sink API\n * @ingroup lavfi\n * @{\n */\n\n#if FF_API_AVFILTERBUFFER\n/**\n * Get an audio/video buffer data from buffer_sink and put it in bufref.\n *\n * This function works with both audio and video buffer sinks.\n *\n * @param buffer_sink pointer to a buffersink or abuffersink context\n * @param flags a combination of AV_BUFFERSINK_FLAG_* flags\n * @return >= 0 in case of success, a negative AVERROR code in case of\n * failure\n */\nattribute_deprecated\nint av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink,\n                                 AVFilterBufferRef **bufref, int flags);\n\n/**\n * Get the number of immediately available frames.\n */\nattribute_deprecated\nint av_buffersink_poll_frame(AVFilterContext *ctx);\n\n/**\n * Get a buffer with filtered data from sink and put it in buf.\n *\n * @param ctx pointer to a context of a buffersink or abuffersink AVFilter.\n * @param buf pointer to the buffer will be written here if buf is non-NULL. buf\n *            must be freed by the caller using avfilter_unref_buffer().\n *            Buf may also be NULL to query whether a buffer is ready to be\n *            output.\n *\n * @return >= 0 in case of success, a negative AVERROR code in case of\n *         failure.\n */\nattribute_deprecated\nint av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf);\n\n/**\n * Same as av_buffersink_read, but with the ability to specify the number of\n * samples read. This function is less efficient than av_buffersink_read(),\n * because it copies the data around.\n *\n * @param ctx pointer to a context of the abuffersink AVFilter.\n * @param buf pointer to the buffer will be written here if buf is non-NULL. buf\n *            must be freed by the caller using avfilter_unref_buffer(). buf\n *            will contain exactly nb_samples audio samples, except at the end\n *            of stream, when it can contain less than nb_samples.\n *            Buf may also be NULL to query whether a buffer is ready to be\n *            output.\n *\n * @warning do not mix this function with av_buffersink_read(). Use only one or\n * the other with a single sink, not both.\n */\nattribute_deprecated\nint av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,\n                               int nb_samples);\n#endif\n\n/**\n * Get a frame with filtered data from sink and put it in frame.\n *\n * @param ctx    pointer to a buffersink or abuffersink filter context.\n * @param frame  pointer to an allocated frame that will be filled with data.\n *               The data must be freed using av_frame_unref() / av_frame_free()\n * @param flags  a combination of AV_BUFFERSINK_FLAG_* flags\n *\n * @return  >= 0 in for success, a negative AVERROR code for failure.\n */\nint av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags);\n\n/**\n * Tell av_buffersink_get_buffer_ref() to read video/samples buffer\n * reference, but not remove it from the buffer. This is useful if you\n * need only to read a video/samples buffer, without to fetch it.\n */\n#define AV_BUFFERSINK_FLAG_PEEK 1\n\n/**\n * Tell av_buffersink_get_buffer_ref() not to request a frame from its input.\n * If a frame is already buffered, it is read (and removed from the buffer),\n * but if no frame is present, return AVERROR(EAGAIN).\n */\n#define AV_BUFFERSINK_FLAG_NO_REQUEST 2\n\n/**\n * Struct to use for initializing a buffersink context.\n */\ntypedef struct {\n    const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE\n} AVBufferSinkParams;\n\n/**\n * Create an AVBufferSinkParams structure.\n *\n * Must be freed with av_free().\n */\nAVBufferSinkParams *av_buffersink_params_alloc(void);\n\n/**\n * Struct to use for initializing an abuffersink context.\n */\ntypedef struct {\n    const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE\n    const int64_t *channel_layouts;         ///< list of allowed channel layouts, terminated by -1\n    const int *channel_counts;              ///< list of allowed channel counts, terminated by -1\n    int all_channel_counts;                 ///< if not 0, accept any channel count or layout\n    int *sample_rates;                      ///< list of allowed sample rates, terminated by -1\n} AVABufferSinkParams;\n\n/**\n * Create an AVABufferSinkParams structure.\n *\n * Must be freed with av_free().\n */\nAVABufferSinkParams *av_abuffersink_params_alloc(void);\n\n/**\n * Set the frame size for an audio buffer sink.\n *\n * All calls to av_buffersink_get_buffer_ref will return a buffer with\n * exactly the specified number of samples, or AVERROR(EAGAIN) if there is\n * not enough. The last buffer at EOF will be padded with 0.\n */\nvoid av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size);\n\n/**\n * Get the frame rate of the input.\n */\nAVRational av_buffersink_get_frame_rate(AVFilterContext *ctx);\n\n/**\n * Get a frame with filtered data from sink and put it in frame.\n *\n * @param ctx pointer to a context of a buffersink or abuffersink AVFilter.\n * @param frame pointer to an allocated frame that will be filled with data.\n *              The data must be freed using av_frame_unref() / av_frame_free()\n *\n * @return\n *         - >= 0 if a frame was successfully returned.\n *         - AVERROR(EAGAIN) if no frames are available at this point; more\n *           input frames must be added to the filtergraph to get more output.\n *         - AVERROR_EOF if there will be no more output frames on this sink.\n *         - A different negative AVERROR code in other failure cases.\n */\nint av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);\n\n/**\n * Same as av_buffersink_get_frame(), but with the ability to specify the number\n * of samples read. This function is less efficient than\n * av_buffersink_get_frame(), because it copies the data around.\n *\n * @param ctx pointer to a context of the abuffersink AVFilter.\n * @param frame pointer to an allocated frame that will be filled with data.\n *              The data must be freed using av_frame_unref() / av_frame_free()\n *              frame will contain exactly nb_samples audio samples, except at\n *              the end of stream, when it can contain less than nb_samples.\n *\n * @return The return codes have the same meaning as for\n *         av_buffersink_get_samples().\n *\n * @warning do not mix this function with av_buffersink_get_frame(). Use only one or\n * the other with a single sink, not both.\n */\nint av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples);\n\n/**\n * @}\n */\n\n#endif /* AVFILTER_BUFFERSINK_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavfilter/buffersrc.h",
    "content": "/*\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_BUFFERSRC_H\n#define AVFILTER_BUFFERSRC_H\n\n/**\n * @file\n * @ingroup lavfi_buffersrc\n * Memory buffer source API.\n */\n\n#include \"libavcodec/avcodec.h\"\n#include \"avfilter.h\"\n\n/**\n * @defgroup lavfi_buffersrc Buffer source API\n * @ingroup lavfi\n * @{\n */\n\nenum {\n\n    /**\n     * Do not check for format changes.\n     */\n    AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1,\n\n#if FF_API_AVFILTERBUFFER\n    /**\n     * Ignored\n     */\n    AV_BUFFERSRC_FLAG_NO_COPY = 2,\n#endif\n\n    /**\n     * Immediately push the frame to the output.\n     */\n    AV_BUFFERSRC_FLAG_PUSH = 4,\n\n    /**\n     * Keep a reference to the frame.\n     * If the frame if reference-counted, create a new reference; otherwise\n     * copy the frame data.\n     */\n    AV_BUFFERSRC_FLAG_KEEP_REF = 8,\n\n};\n\n/**\n * Add buffer data in picref to buffer_src.\n *\n * @param buffer_src  pointer to a buffer source context\n * @param picref      a buffer reference, or NULL to mark EOF\n * @param flags       a combination of AV_BUFFERSRC_FLAG_*\n * @return            >= 0 in case of success, a negative AVERROR code\n *                    in case of failure\n */\nint av_buffersrc_add_ref(AVFilterContext *buffer_src,\n                         AVFilterBufferRef *picref, int flags);\n\n/**\n * Get the number of failed requests.\n *\n * A failed request is when the request_frame method is called while no\n * frame is present in the buffer.\n * The number is reset when a frame is added.\n */\nunsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);\n\n#if FF_API_AVFILTERBUFFER\n/**\n * Add a buffer to a filtergraph.\n *\n * @param ctx an instance of the buffersrc filter\n * @param buf buffer containing frame data to be passed down the filtergraph.\n * This function will take ownership of buf, the user must not free it.\n * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.\n *\n * @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame()\n */\nattribute_deprecated\nint av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf);\n#endif\n\n/**\n * Add a frame to the buffer source.\n *\n * @param ctx   an instance of the buffersrc filter\n * @param frame frame to be added. If the frame is reference counted, this\n * function will make a new reference to it. Otherwise the frame data will be\n * copied.\n *\n * @return 0 on success, a negative AVERROR on error\n *\n * This function is equivalent to av_buffersrc_add_frame_flags() with the\n * AV_BUFFERSRC_FLAG_KEEP_REF flag.\n */\nint av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame);\n\n/**\n * Add a frame to the buffer source.\n *\n * @param ctx   an instance of the buffersrc filter\n * @param frame frame to be added. If the frame is reference counted, this\n * function will take ownership of the reference(s) and reset the frame.\n * Otherwise the frame data will be copied. If this function returns an error,\n * the input frame is not touched.\n *\n * @return 0 on success, a negative AVERROR on error.\n *\n * @note the difference between this function and av_buffersrc_write_frame() is\n * that av_buffersrc_write_frame() creates a new reference to the input frame,\n * while this function takes ownership of the reference passed to it.\n *\n * This function is equivalent to av_buffersrc_add_frame_flags() without the\n * AV_BUFFERSRC_FLAG_KEEP_REF flag.\n */\nint av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);\n\n/**\n * Add a frame to the buffer source.\n *\n * By default, if the frame is reference-counted, this function will take\n * ownership of the reference(s) and reset the frame. This can be controled\n * using the flags.\n *\n * If this function returns an error, the input frame is not touched.\n *\n * @param buffer_src  pointer to a buffer source context\n * @param frame       a frame, or NULL to mark EOF\n * @param flags       a combination of AV_BUFFERSRC_FLAG_*\n * @return            >= 0 in case of success, a negative AVERROR code\n *                    in case of failure\n */\nint av_buffersrc_add_frame_flags(AVFilterContext *buffer_src,\n                                 AVFrame *frame, int flags);\n\n\n/**\n * @}\n */\n\n#endif /* AVFILTER_BUFFERSRC_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavfilter/version.h",
    "content": "/*\n * Version macros.\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_VERSION_H\n#define AVFILTER_VERSION_H\n\n/**\n * @file\n * @ingroup lavfi\n * Libavfilter version macros\n */\n\n#include \"libavutil/version.h\"\n\n#define LIBAVFILTER_VERSION_MAJOR   4\n#define LIBAVFILTER_VERSION_MINOR   2\n#define LIBAVFILTER_VERSION_MICRO 100\n\n#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \\\n                                               LIBAVFILTER_VERSION_MINOR, \\\n                                               LIBAVFILTER_VERSION_MICRO)\n#define LIBAVFILTER_VERSION     AV_VERSION(LIBAVFILTER_VERSION_MAJOR,   \\\n                                           LIBAVFILTER_VERSION_MINOR,   \\\n                                           LIBAVFILTER_VERSION_MICRO)\n#define LIBAVFILTER_BUILD       LIBAVFILTER_VERSION_INT\n\n#define LIBAVFILTER_IDENT       \"Lavfi\" AV_STRINGIFY(LIBAVFILTER_VERSION)\n\n/**\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n */\n\n#ifndef FF_API_AVFILTERPAD_PUBLIC\n#define FF_API_AVFILTERPAD_PUBLIC           (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_FOO_COUNT\n#define FF_API_FOO_COUNT                    (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_FILL_FRAME\n#define FF_API_FILL_FRAME                   (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_BUFFERSRC_BUFFER\n#define FF_API_BUFFERSRC_BUFFER             (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_AVFILTERBUFFER\n#define FF_API_AVFILTERBUFFER               (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_OLD_FILTER_OPTS\n#define FF_API_OLD_FILTER_OPTS              (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_ACONVERT_FILTER\n#define FF_API_ACONVERT_FILTER              (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_AVFILTER_OPEN\n#define FF_API_AVFILTER_OPEN                (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_AVFILTER_INIT_FILTER\n#define FF_API_AVFILTER_INIT_FILTER         (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_OLD_FILTER_REGISTER\n#define FF_API_OLD_FILTER_REGISTER          (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_OLD_GRAPH_PARSE\n#define FF_API_OLD_GRAPH_PARSE              (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_DRAWTEXT_OLD_TIMELINE\n#define FF_API_DRAWTEXT_OLD_TIMELINE        (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_NOCONST_GET_NAME\n#define FF_API_NOCONST_GET_NAME             (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_INTERLACE_LOWPASS_SET\n#define FF_API_INTERLACE_LOWPASS_SET        (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n\n#endif /* AVFILTER_VERSION_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavformat/avformat.h",
    "content": "/*\n * copyright (c) 2001 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFORMAT_AVFORMAT_H\n#define AVFORMAT_AVFORMAT_H\n\n/**\n * @file\n * @ingroup libavf\n * Main libavformat public API header\n */\n\n/**\n * @defgroup libavf I/O and Muxing/Demuxing Library\n * @{\n *\n * Libavformat (lavf) is a library for dealing with various media container\n * formats. Its main two purposes are demuxing - i.e. splitting a media file\n * into component streams, and the reverse process of muxing - writing supplied\n * data in a specified container format. It also has an @ref lavf_io\n * \"I/O module\" which supports a number of protocols for accessing the data (e.g.\n * file, tcp, http and others). Before using lavf, you need to call\n * av_register_all() to register all compiled muxers, demuxers and protocols.\n * Unless you are absolutely sure you won't use libavformat's network\n * capabilities, you should also call avformat_network_init().\n *\n * A supported input format is described by an AVInputFormat struct, conversely\n * an output format is described by AVOutputFormat. You can iterate over all\n * registered input/output formats using the av_iformat_next() /\n * av_oformat_next() functions. The protocols layer is not part of the public\n * API, so you can only get the names of supported protocols with the\n * avio_enum_protocols() function.\n *\n * Main lavf structure used for both muxing and demuxing is AVFormatContext,\n * which exports all information about the file being read or written. As with\n * most Libavformat structures, its size is not part of public ABI, so it cannot be\n * allocated on stack or directly with av_malloc(). To create an\n * AVFormatContext, use avformat_alloc_context() (some functions, like\n * avformat_open_input() might do that for you).\n *\n * Most importantly an AVFormatContext contains:\n * @li the @ref AVFormatContext.iformat \"input\" or @ref AVFormatContext.oformat\n * \"output\" format. It is either autodetected or set by user for input;\n * always set by user for output.\n * @li an @ref AVFormatContext.streams \"array\" of AVStreams, which describe all\n * elementary streams stored in the file. AVStreams are typically referred to\n * using their index in this array.\n * @li an @ref AVFormatContext.pb \"I/O context\". It is either opened by lavf or\n * set by user for input, always set by user for output (unless you are dealing\n * with an AVFMT_NOFILE format).\n *\n * @section lavf_options Passing options to (de)muxers\n * Lavf allows to configure muxers and demuxers using the @ref avoptions\n * mechanism. Generic (format-independent) libavformat options are provided by\n * AVFormatContext, they can be examined from a user program by calling\n * av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass\n * from avformat_get_class()). Private (format-specific) options are provided by\n * AVFormatContext.priv_data if and only if AVInputFormat.priv_class /\n * AVOutputFormat.priv_class of the corresponding format struct is non-NULL.\n * Further options may be provided by the @ref AVFormatContext.pb \"I/O context\",\n * if its AVClass is non-NULL, and the protocols layer. See the discussion on\n * nesting in @ref avoptions documentation to learn how to access those.\n *\n * @defgroup lavf_decoding Demuxing\n * @{\n * Demuxers read a media file and split it into chunks of data (@em packets). A\n * @ref AVPacket \"packet\" contains one or more encoded frames which belongs to a\n * single elementary stream. In the lavf API this process is represented by the\n * avformat_open_input() function for opening a file, av_read_frame() for\n * reading a single packet and finally avformat_close_input(), which does the\n * cleanup.\n *\n * @section lavf_decoding_open Opening a media file\n * The minimum information required to open a file is its URL or filename, which\n * is passed to avformat_open_input(), as in the following code:\n * @code\n * const char    *url = \"in.mp3\";\n * AVFormatContext *s = NULL;\n * int ret = avformat_open_input(&s, url, NULL, NULL);\n * if (ret < 0)\n *     abort();\n * @endcode\n * The above code attempts to allocate an AVFormatContext, open the\n * specified file (autodetecting the format) and read the header, exporting the\n * information stored there into s. Some formats do not have a header or do not\n * store enough information there, so it is recommended that you call the\n * avformat_find_stream_info() function which tries to read and decode a few\n * frames to find missing information.\n *\n * In some cases you might want to preallocate an AVFormatContext yourself with\n * avformat_alloc_context() and do some tweaking on it before passing it to\n * avformat_open_input(). One such case is when you want to use custom functions\n * for reading input data instead of lavf internal I/O layer.\n * To do that, create your own AVIOContext with avio_alloc_context(), passing\n * your reading callbacks to it. Then set the @em pb field of your\n * AVFormatContext to newly created AVIOContext.\n *\n * Since the format of the opened file is in general not known until after\n * avformat_open_input() has returned, it is not possible to set demuxer private\n * options on a preallocated context. Instead, the options should be passed to\n * avformat_open_input() wrapped in an AVDictionary:\n * @code\n * AVDictionary *options = NULL;\n * av_dict_set(&options, \"video_size\", \"640x480\", 0);\n * av_dict_set(&options, \"pixel_format\", \"rgb24\", 0);\n *\n * if (avformat_open_input(&s, url, NULL, &options) < 0)\n *     abort();\n * av_dict_free(&options);\n * @endcode\n * This code passes the private options 'video_size' and 'pixel_format' to the\n * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it\n * cannot know how to interpret raw video data otherwise. If the format turns\n * out to be something different than raw video, those options will not be\n * recognized by the demuxer and therefore will not be applied. Such unrecognized\n * options are then returned in the options dictionary (recognized options are\n * consumed). The calling program can handle such unrecognized options as it\n * wishes, e.g.\n * @code\n * AVDictionaryEntry *e;\n * if (e = av_dict_get(options, \"\", NULL, AV_DICT_IGNORE_SUFFIX)) {\n *     fprintf(stderr, \"Option %s not recognized by the demuxer.\\n\", e->key);\n *     abort();\n * }\n * @endcode\n *\n * After you have finished reading the file, you must close it with\n * avformat_close_input(). It will free everything associated with the file.\n *\n * @section lavf_decoding_read Reading from an opened file\n * Reading data from an opened AVFormatContext is done by repeatedly calling\n * av_read_frame() on it. Each call, if successful, will return an AVPacket\n * containing encoded data for one AVStream, identified by\n * AVPacket.stream_index. This packet may be passed straight into the libavcodec\n * decoding functions avcodec_decode_video2(), avcodec_decode_audio4() or\n * avcodec_decode_subtitle2() if the caller wishes to decode the data.\n *\n * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be\n * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for\n * pts/dts, 0 for duration) if the stream does not provide them. The timing\n * information will be in AVStream.time_base units, i.e. it has to be\n * multiplied by the timebase to convert them to seconds.\n *\n * If AVPacket.buf is set on the returned packet, then the packet is\n * allocated dynamically and the user may keep it indefinitely.\n * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a\n * static storage somewhere inside the demuxer and the packet is only valid\n * until the next av_read_frame() call or closing the file. If the caller\n * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy\n * of it.\n * In both cases, the packet must be freed with av_free_packet() when it is no\n * longer needed.\n *\n * @section lavf_decoding_seek Seeking\n * @}\n *\n * @defgroup lavf_encoding Muxing\n * @{\n * Muxers take encoded data in the form of @ref AVPacket \"AVPackets\" and write\n * it into files or other output bytestreams in the specified container format.\n *\n * The main API functions for muxing are avformat_write_header() for writing the\n * file header, av_write_frame() / av_interleaved_write_frame() for writing the\n * packets and av_write_trailer() for finalizing the file.\n *\n * At the beginning of the muxing process, the caller must first call\n * avformat_alloc_context() to create a muxing context. The caller then sets up\n * the muxer by filling the various fields in this context:\n *\n * - The @ref AVFormatContext.oformat \"oformat\" field must be set to select the\n *   muxer that will be used.\n * - Unless the format is of the AVFMT_NOFILE type, the @ref AVFormatContext.pb\n *   \"pb\" field must be set to an opened IO context, either returned from\n *   avio_open2() or a custom one.\n * - Unless the format is of the AVFMT_NOSTREAMS type, at least one stream must\n *   be created with the avformat_new_stream() function. The caller should fill\n *   the @ref AVStream.codec \"stream codec context\" information, such as the\n *   codec @ref AVCodecContext.codec_type \"type\", @ref AVCodecContext.codec_id\n *   \"id\" and other parameters (e.g. width / height, the pixel or sample format,\n *   etc.) as known. The @ref AVCodecContext.time_base \"codec timebase\" should\n *   be set to the timebase that the caller desires to use for this stream (note\n *   that the timebase actually used by the muxer can be different, as will be\n *   described later).\n * - The caller may fill in additional information, such as @ref\n *   AVFormatContext.metadata \"global\" or @ref AVStream.metadata \"per-stream\"\n *   metadata, @ref AVFormatContext.chapters \"chapters\", @ref\n *   AVFormatContext.programs \"programs\", etc. as described in the\n *   AVFormatContext documentation. Whether such information will actually be\n *   stored in the output depends on what the container format and the muxer\n *   support.\n *\n * When the muxing context is fully set up, the caller must call\n * avformat_write_header() to initialize the muxer internals and write the file\n * header. Whether anything actually is written to the IO context at this step\n * depends on the muxer, but this function must always be called. Any muxer\n * private options must be passed in the options parameter to this function.\n *\n * The data is then sent to the muxer by repeatedly calling av_write_frame() or\n * av_interleaved_write_frame() (consult those functions' documentation for\n * discussion on the difference between them; only one of them may be used with\n * a single muxing context, they should not be mixed). Do note that the timing\n * information on the packets sent to the muxer must be in the corresponding\n * AVStream's timebase. That timebase is set by the muxer (in the\n * avformat_write_header() step) and may be different from the timebase the\n * caller set on the codec context.\n *\n * Once all the data has been written, the caller must call av_write_trailer()\n * to flush any buffered packets and finalize the output file, then close the IO\n * context (if any) and finally free the muxing context with\n * avformat_free_context().\n * @}\n *\n * @defgroup lavf_io I/O Read/Write\n * @{\n * @}\n *\n * @defgroup lavf_codec Demuxers\n * @{\n * @defgroup lavf_codec_native Native Demuxers\n * @{\n * @}\n * @defgroup lavf_codec_wrappers External library wrappers\n * @{\n * @}\n * @}\n * @defgroup lavf_protos I/O Protocols\n * @{\n * @}\n * @defgroup lavf_internal Internal\n * @{\n * @}\n * @}\n *\n */\n\n#include <time.h>\n#include <stdio.h>  /* FILE */\n#include \"libavcodec/avcodec.h\"\n#include \"libavutil/dict.h\"\n#include \"libavutil/log.h\"\n\n#include \"avio.h\"\n#include \"libavformat/version.h\"\n\nstruct AVFormatContext;\n\nstruct AVDeviceInfoList;\n\n/**\n * @defgroup metadata_api Public Metadata API\n * @{\n * @ingroup libavf\n * The metadata API allows libavformat to export metadata tags to a client\n * application when demuxing. Conversely it allows a client application to\n * set metadata when muxing.\n *\n * Metadata is exported or set as pairs of key/value strings in the 'metadata'\n * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs\n * using the @ref lavu_dict \"AVDictionary\" API. Like all strings in FFmpeg,\n * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata\n * exported by demuxers isn't checked to be valid UTF-8 in most cases.\n *\n * Important concepts to keep in mind:\n * -  Keys are unique; there can never be 2 tags with the same key. This is\n *    also meant semantically, i.e., a demuxer should not knowingly produce\n *    several keys that are literally different but semantically identical.\n *    E.g., key=Author5, key=Author6. In this example, all authors must be\n *    placed in the same tag.\n * -  Metadata is flat, not hierarchical; there are no subtags. If you\n *    want to store, e.g., the email address of the child of producer Alice\n *    and actor Bob, that could have key=alice_and_bobs_childs_email_address.\n * -  Several modifiers can be applied to the tag name. This is done by\n *    appending a dash character ('-') and the modifier name in the order\n *    they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng.\n *    -  language -- a tag whose value is localized for a particular language\n *       is appended with the ISO 639-2/B 3-letter language code.\n *       For example: Author-ger=Michael, Author-eng=Mike\n *       The original/default language is in the unqualified \"Author\" tag.\n *       A demuxer should set a default if it sets any translated tag.\n *    -  sorting  -- a modified version of a tag that should be used for\n *       sorting will have '-sort' appended. E.g. artist=\"The Beatles\",\n *       artist-sort=\"Beatles, The\".\n *\n * -  Demuxers attempt to export metadata in a generic format, however tags\n *    with no generic equivalents are left as they are stored in the container.\n *    Follows a list of generic tag names:\n *\n @verbatim\n album        -- name of the set this work belongs to\n album_artist -- main creator of the set/album, if different from artist.\n                 e.g. \"Various Artists\" for compilation albums.\n artist       -- main creator of the work\n comment      -- any additional description of the file.\n composer     -- who composed the work, if different from artist.\n copyright    -- name of copyright holder.\n creation_time-- date when the file was created, preferably in ISO 8601.\n date         -- date when the work was created, preferably in ISO 8601.\n disc         -- number of a subset, e.g. disc in a multi-disc collection.\n encoder      -- name/settings of the software/hardware that produced the file.\n encoded_by   -- person/group who created the file.\n filename     -- original name of the file.\n genre        -- <self-evident>.\n language     -- main language in which the work is performed, preferably\n                 in ISO 639-2 format. Multiple languages can be specified by\n                 separating them with commas.\n performer    -- artist who performed the work, if different from artist.\n                 E.g for \"Also sprach Zarathustra\", artist would be \"Richard\n                 Strauss\" and performer \"London Philharmonic Orchestra\".\n publisher    -- name of the label/publisher.\n service_name     -- name of the service in broadcasting (channel name).\n service_provider -- name of the service provider in broadcasting.\n title        -- name of the work.\n track        -- number of this work in the set, can be in form current/total.\n variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of\n @endverbatim\n *\n * Look in the examples section for an application example how to use the Metadata API.\n *\n * @}\n */\n\n/* packet functions */\n\n\n/**\n * Allocate and read the payload of a packet and initialize its\n * fields with default values.\n *\n * @param s    associated IO context\n * @param pkt packet\n * @param size desired payload size\n * @return >0 (read size) if OK, AVERROR_xxx otherwise\n */\nint av_get_packet(AVIOContext *s, AVPacket *pkt, int size);\n\n\n/**\n * Read data and append it to the current content of the AVPacket.\n * If pkt->size is 0 this is identical to av_get_packet.\n * Note that this uses av_grow_packet and thus involves a realloc\n * which is inefficient. Thus this function should only be used\n * when there is no reasonable way to know (an upper bound of)\n * the final size.\n *\n * @param s    associated IO context\n * @param pkt packet\n * @param size amount of data to read\n * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data\n *         will not be lost even if an error occurs.\n */\nint av_append_packet(AVIOContext *s, AVPacket *pkt, int size);\n\n/*************************************************/\n/* fractional numbers for exact pts handling */\n\n/**\n * The exact value of the fractional number is: 'val + num / den'.\n * num is assumed to be 0 <= num < den.\n */\ntypedef struct AVFrac {\n    int64_t val, num, den;\n} AVFrac;\n\n/*************************************************/\n/* input/output formats */\n\nstruct AVCodecTag;\n\n/**\n * This structure contains the data a format has to probe a file.\n */\ntypedef struct AVProbeData {\n    const char *filename;\n    unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */\n    int buf_size;       /**< Size of buf except extra allocated bytes */\n} AVProbeData;\n\n#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4)\n#define AVPROBE_SCORE_STREAM_RETRY (AVPROBE_SCORE_MAX/4-1)\n\n#define AVPROBE_SCORE_EXTENSION  50 ///< score for file extension\n#define AVPROBE_SCORE_MAX       100 ///< maximum score\n\n#define AVPROBE_PADDING_SIZE 32             ///< extra allocated bytes at the end of the probe buffer\n\n/// Demuxer will use avio_open, no opened file should be provided by the caller.\n#define AVFMT_NOFILE        0x0001\n#define AVFMT_NEEDNUMBER    0x0002 /**< Needs '%d' in filename. */\n#define AVFMT_SHOW_IDS      0x0008 /**< Show format stream IDs numbers. */\n#define AVFMT_RAWPICTURE    0x0020 /**< Format wants AVPicture structure for\n                                      raw picture data. */\n#define AVFMT_GLOBALHEADER  0x0040 /**< Format wants global header. */\n#define AVFMT_NOTIMESTAMPS  0x0080 /**< Format does not need / have any timestamps. */\n#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */\n#define AVFMT_TS_DISCONT    0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */\n#define AVFMT_VARIABLE_FPS  0x0400 /**< Format allows variable fps. */\n#define AVFMT_NODIMENSIONS  0x0800 /**< Format does not need width/height */\n#define AVFMT_NOSTREAMS     0x1000 /**< Format does not require any streams */\n#define AVFMT_NOBINSEARCH   0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */\n#define AVFMT_NOGENSEARCH   0x4000 /**< Format does not allow to fall back on generic search */\n#define AVFMT_NO_BYTE_SEEK  0x8000 /**< Format does not allow seeking by bytes */\n#define AVFMT_ALLOW_FLUSH  0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */\n#if LIBAVFORMAT_VERSION_MAJOR <= 54\n#define AVFMT_TS_NONSTRICT 0x8020000 //we try to be compatible to the ABIs of ffmpeg and major forks\n#else\n#define AVFMT_TS_NONSTRICT 0x20000\n#endif\n                                   /**< Format does not require strictly\n                                        increasing timestamps, but they must\n                                        still be monotonic */\n#define AVFMT_TS_NEGATIVE  0x40000 /**< Format allows muxing negative\n                                        timestamps. If not set the timestamp\n                                        will be shifted in av_write_frame and\n                                        av_interleaved_write_frame so they\n                                        start from 0.\n                                        The user or muxer can override this through\n                                        AVFormatContext.avoid_negative_ts\n                                        */\n\n#define AVFMT_SEEK_TO_PTS   0x4000000 /**< Seeking is based on PTS */\n\n/**\n * @addtogroup lavf_encoding\n * @{\n */\ntypedef struct AVOutputFormat {\n    const char *name;\n    /**\n     * Descriptive name for the format, meant to be more human-readable\n     * than name. You should use the NULL_IF_CONFIG_SMALL() macro\n     * to define it.\n     */\n    const char *long_name;\n    const char *mime_type;\n    const char *extensions; /**< comma-separated filename extensions */\n    /* output support */\n    enum AVCodecID audio_codec;    /**< default audio codec */\n    enum AVCodecID video_codec;    /**< default video codec */\n    enum AVCodecID subtitle_codec; /**< default subtitle codec */\n    /**\n     * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE,\n     * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS,\n     * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH,\n     * AVFMT_TS_NONSTRICT\n     */\n    int flags;\n\n    /**\n     * List of supported codec_id-codec_tag pairs, ordered by \"better\n     * choice first\". The arrays are all terminated by AV_CODEC_ID_NONE.\n     */\n    const struct AVCodecTag * const *codec_tag;\n\n\n    const AVClass *priv_class; ///< AVClass for the private context\n\n    /*****************************************************************\n     * No fields below this line are part of the public API. They\n     * may not be used outside of libavformat and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    struct AVOutputFormat *next;\n    /**\n     * size of private data so that it can be allocated in the wrapper\n     */\n    int priv_data_size;\n\n    int (*write_header)(struct AVFormatContext *);\n    /**\n     * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags,\n     * pkt can be NULL in order to flush data buffered in the muxer.\n     * When flushing, return 0 if there still is more data to flush,\n     * or 1 if everything was flushed and there is no more buffered\n     * data.\n     */\n    int (*write_packet)(struct AVFormatContext *, AVPacket *pkt);\n    int (*write_trailer)(struct AVFormatContext *);\n    /**\n     * Currently only used to set pixel format if not YUV420P.\n     */\n    int (*interleave_packet)(struct AVFormatContext *, AVPacket *out,\n                             AVPacket *in, int flush);\n    /**\n     * Test if the given codec can be stored in this container.\n     *\n     * @return 1 if the codec is supported, 0 if it is not.\n     *         A negative number if unknown.\n     *         MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC\n     */\n    int (*query_codec)(enum AVCodecID id, int std_compliance);\n\n    void (*get_output_timestamp)(struct AVFormatContext *s, int stream,\n                                 int64_t *dts, int64_t *wall);\n    /**\n     * Allows sending messages from application to device.\n     */\n    int (*control_message)(struct AVFormatContext *s, int type,\n                           void *data, size_t data_size);\n\n    /**\n     * Write an uncoded AVFrame.\n     *\n     * See av_write_uncoded_frame() for details.\n     *\n     * The library will free *frame afterwards, but the muxer can prevent it\n     * by setting the pointer to NULL.\n     */\n    int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index,\n                               AVFrame **frame, unsigned flags);\n    /**\n     * Returns device list with it properties.\n     * @see avdevice_list_devices() for more details.\n     */\n    int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list);\n} AVOutputFormat;\n/**\n * @}\n */\n\n/**\n * @addtogroup lavf_decoding\n * @{\n */\ntypedef struct AVInputFormat {\n    /**\n     * A comma separated list of short names for the format. New names\n     * may be appended with a minor bump.\n     */\n    const char *name;\n\n    /**\n     * Descriptive name for the format, meant to be more human-readable\n     * than name. You should use the NULL_IF_CONFIG_SMALL() macro\n     * to define it.\n     */\n    const char *long_name;\n\n    /**\n     * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS,\n     * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH,\n     * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.\n     */\n    int flags;\n\n    /**\n     * If extensions are defined, then no probe is done. You should\n     * usually not use extension format guessing because it is not\n     * reliable enough\n     */\n    const char *extensions;\n\n    const struct AVCodecTag * const *codec_tag;\n\n    const AVClass *priv_class; ///< AVClass for the private context\n\n    /*****************************************************************\n     * No fields below this line are part of the public API. They\n     * may not be used outside of libavformat and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    struct AVInputFormat *next;\n\n    /**\n     * Raw demuxers store their codec ID here.\n     */\n    int raw_codec_id;\n\n    /**\n     * Size of private data so that it can be allocated in the wrapper.\n     */\n    int priv_data_size;\n\n    /**\n     * Tell if a given file has a chance of being parsed as this format.\n     * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes\n     * big so you do not have to check for that unless you need more.\n     */\n    int (*read_probe)(AVProbeData *);\n\n    /**\n     * Read the format header and initialize the AVFormatContext\n     * structure. Return 0 if OK. Only used in raw format right\n     * now. 'avformat_new_stream' should be called to create new streams.\n     */\n    int (*read_header)(struct AVFormatContext *);\n\n    /**\n     * Read one packet and put it in 'pkt'. pts and flags are also\n     * set. 'avformat_new_stream' can be called only if the flag\n     * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a\n     * background thread).\n     * @return 0 on success, < 0 on error.\n     *         When returning an error, pkt must not have been allocated\n     *         or must be freed before returning\n     */\n    int (*read_packet)(struct AVFormatContext *, AVPacket *pkt);\n\n    /**\n     * Close the stream. The AVFormatContext and AVStreams are not\n     * freed by this function\n     */\n    int (*read_close)(struct AVFormatContext *);\n\n    /**\n     * Seek to a given timestamp relative to the frames in\n     * stream component stream_index.\n     * @param stream_index Must not be -1.\n     * @param flags Selects which direction should be preferred if no exact\n     *              match is available.\n     * @return >= 0 on success (but not necessarily the new offset)\n     */\n    int (*read_seek)(struct AVFormatContext *,\n                     int stream_index, int64_t timestamp, int flags);\n\n    /**\n     * Get the next timestamp in stream[stream_index].time_base units.\n     * @return the timestamp or AV_NOPTS_VALUE if an error occurred\n     */\n    int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index,\n                              int64_t *pos, int64_t pos_limit);\n\n    /**\n     * Start/resume playing - only meaningful if using a network-based format\n     * (RTSP).\n     */\n    int (*read_play)(struct AVFormatContext *);\n\n    /**\n     * Pause playing - only meaningful if using a network-based format\n     * (RTSP).\n     */\n    int (*read_pause)(struct AVFormatContext *);\n\n    /**\n     * Seek to timestamp ts.\n     * Seeking will be done so that the point from which all active streams\n     * can be presented successfully will be closest to ts and within min/max_ts.\n     * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL.\n     */\n    int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags);\n\n    /**\n     * Returns device list with it properties.\n     * @see avdevice_list_devices() for more details.\n     */\n    int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list);\n} AVInputFormat;\n/**\n * @}\n */\n\nenum AVStreamParseType {\n    AVSTREAM_PARSE_NONE,\n    AVSTREAM_PARSE_FULL,       /**< full parsing and repack */\n    AVSTREAM_PARSE_HEADERS,    /**< Only parse headers, do not repack. */\n    AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */\n    AVSTREAM_PARSE_FULL_ONCE,  /**< full parsing and repack of the first frame only, only implemented for H.264 currently */\n    AVSTREAM_PARSE_FULL_RAW=MKTAG(0,'R','A','W'),       /**< full parsing and repack with timestamp and position generation by parser for raw\n                                                             this assumes that each packet in the file contains no demuxer level headers and\n                                                             just codec level data, otherwise position generation would fail */\n};\n\ntypedef struct AVIndexEntry {\n    int64_t pos;\n    int64_t timestamp;        /**<\n                               * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available\n                               * when seeking to this entry. That means preferable PTS on keyframe based formats.\n                               * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better\n                               * is known\n                               */\n#define AVINDEX_KEYFRAME 0x0001\n    int flags:2;\n    int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment).\n    int min_distance;         /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */\n} AVIndexEntry;\n\n#define AV_DISPOSITION_DEFAULT   0x0001\n#define AV_DISPOSITION_DUB       0x0002\n#define AV_DISPOSITION_ORIGINAL  0x0004\n#define AV_DISPOSITION_COMMENT   0x0008\n#define AV_DISPOSITION_LYRICS    0x0010\n#define AV_DISPOSITION_KARAOKE   0x0020\n\n/**\n * Track should be used during playback by default.\n * Useful for subtitle track that should be displayed\n * even when user did not explicitly ask for subtitles.\n */\n#define AV_DISPOSITION_FORCED    0x0040\n#define AV_DISPOSITION_HEARING_IMPAIRED  0x0080  /**< stream for hearing impaired audiences */\n#define AV_DISPOSITION_VISUAL_IMPAIRED   0x0100  /**< stream for visual impaired audiences */\n#define AV_DISPOSITION_CLEAN_EFFECTS     0x0200  /**< stream without voice */\n/**\n * The stream is stored in the file as an attached picture/\"cover art\" (e.g.\n * APIC frame in ID3v2). The single packet associated with it will be returned\n * among the first few packets read from the file unless seeking takes place.\n * It can also be accessed at any time in AVStream.attached_pic.\n */\n#define AV_DISPOSITION_ATTACHED_PIC      0x0400\n\n/**\n * To specify text track kind (different from subtitles default).\n */\n#define AV_DISPOSITION_CAPTIONS     0x10000\n#define AV_DISPOSITION_DESCRIPTIONS 0x20000\n#define AV_DISPOSITION_METADATA     0x40000\n\n/**\n * Options for behavior on timestamp wrap detection.\n */\n#define AV_PTS_WRAP_IGNORE      0   ///< ignore the wrap\n#define AV_PTS_WRAP_ADD_OFFSET  1   ///< add the format specific offset on wrap detection\n#define AV_PTS_WRAP_SUB_OFFSET  -1  ///< subtract the format specific offset on wrap detection\n\n/**\n * Stream structure.\n * New fields can be added to the end with minor version bumps.\n * Removal, reordering and changes to existing fields require a major\n * version bump.\n * sizeof(AVStream) must not be used outside libav*.\n */\ntypedef struct AVStream {\n    int index;    /**< stream index in AVFormatContext */\n    /**\n     * Format-specific stream ID.\n     * decoding: set by libavformat\n     * encoding: set by the user, replaced by libavformat if left unset\n     */\n    int id;\n    /**\n     * Codec context associated with this stream. Allocated and freed by\n     * libavformat.\n     *\n     * - decoding: The demuxer exports codec information stored in the headers\n     *             here.\n     * - encoding: The user sets codec information, the muxer writes it to the\n     *             output. Mandatory fields as specified in AVCodecContext\n     *             documentation must be set even if this AVCodecContext is\n     *             not actually used for encoding.\n     */\n    AVCodecContext *codec;\n    void *priv_data;\n\n    /**\n     * encoding: pts generation when outputting stream\n     */\n    struct AVFrac pts;\n\n    /**\n     * This is the fundamental unit of time (in seconds) in terms\n     * of which frame timestamps are represented.\n     *\n     * decoding: set by libavformat\n     * encoding: set by libavformat in avformat_write_header. The muxer may use the\n     * user-provided value of @ref AVCodecContext.time_base \"codec->time_base\"\n     * as a hint.\n     */\n    AVRational time_base;\n\n    /**\n     * Decoding: pts of the first frame of the stream in presentation order, in stream time base.\n     * Only set this if you are absolutely 100% sure that the value you set\n     * it to really is the pts of the first frame.\n     * This may be undefined (AV_NOPTS_VALUE).\n     * @note The ASF header does NOT contain a correct start_time the ASF\n     * demuxer must NOT set this.\n     */\n    int64_t start_time;\n\n    /**\n     * Decoding: duration of the stream, in stream time base.\n     * If a source file does not specify a duration, but does specify\n     * a bitrate, this value will be estimated from bitrate and file size.\n     */\n    int64_t duration;\n\n    int64_t nb_frames;                 ///< number of frames in this stream if known or 0\n\n    int disposition; /**< AV_DISPOSITION_* bit field */\n\n    enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed.\n\n    /**\n     * sample aspect ratio (0 if unknown)\n     * - encoding: Set by user.\n     * - decoding: Set by libavformat.\n     */\n    AVRational sample_aspect_ratio;\n\n    AVDictionary *metadata;\n\n    /**\n     * Average framerate\n     */\n    AVRational avg_frame_rate;\n\n    /**\n     * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet\n     * will contain the attached picture.\n     *\n     * decoding: set by libavformat, must not be modified by the caller.\n     * encoding: unused\n     */\n    AVPacket attached_pic;\n\n    /*****************************************************************\n     * All fields below this line are not part of the public API. They\n     * may not be used outside of libavformat and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n\n    /**\n     * Stream information used internally by av_find_stream_info()\n     */\n#define MAX_STD_TIMEBASES (60*12+6)\n    struct {\n        int64_t last_dts;\n        int64_t duration_gcd;\n        int duration_count;\n        int64_t rfps_duration_sum;\n        double (*duration_error)[2][MAX_STD_TIMEBASES];\n        int64_t codec_info_duration;\n        int64_t codec_info_duration_fields;\n\n        /**\n         * 0  -> decoder has not been searched for yet.\n         * >0 -> decoder found\n         * <0 -> decoder with codec_id == -found_decoder has not been found\n         */\n        int found_decoder;\n\n        int64_t last_duration;\n\n        /**\n         * Those are used for average framerate estimation.\n         */\n        int64_t fps_first_dts;\n        int     fps_first_dts_idx;\n        int64_t fps_last_dts;\n        int     fps_last_dts_idx;\n\n    } *info;\n\n    int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */\n\n#if FF_API_REFERENCE_DTS\n    /* a hack to keep ABI compatibility for ffmpeg and other applications, which accesses parser even\n     * though it should not */\n    int64_t do_not_use;\n#endif\n    // Timestamp generation support:\n    /**\n     * Timestamp corresponding to the last dts sync point.\n     *\n     * Initialized when AVCodecParserContext.dts_sync_point >= 0 and\n     * a DTS is received from the underlying container. Otherwise set to\n     * AV_NOPTS_VALUE by default.\n     */\n    int64_t first_dts;\n    int64_t cur_dts;\n    int64_t last_IP_pts;\n    int last_IP_duration;\n\n    /**\n     * Number of packets to buffer for codec probing\n     */\n#define MAX_PROBE_PACKETS 2500\n    int probe_packets;\n\n    /**\n     * Number of frames that have been demuxed during av_find_stream_info()\n     */\n    int codec_info_nb_frames;\n\n    /* av_read_frame() support */\n    enum AVStreamParseType need_parsing;\n    struct AVCodecParserContext *parser;\n\n    /**\n     * last packet in packet_buffer for this stream when muxing.\n     */\n    struct AVPacketList *last_in_packet_buffer;\n    AVProbeData probe_data;\n#define MAX_REORDER_DELAY 16\n    int64_t pts_buffer[MAX_REORDER_DELAY+1];\n\n    AVIndexEntry *index_entries; /**< Only used if the format does not\n                                    support seeking natively. */\n    int nb_index_entries;\n    unsigned int index_entries_allocated_size;\n\n    /**\n     * Real base framerate of the stream.\n     * This is the lowest framerate with which all timestamps can be\n     * represented accurately (it is the least common multiple of all\n     * framerates in the stream). Note, this value is just a guess!\n     * For example, if the time base is 1/90000 and all frames have either\n     * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1.\n     *\n     * Code outside avformat should access this field using:\n     * av_stream_get/set_r_frame_rate(stream)\n     */\n    AVRational r_frame_rate;\n\n    /**\n     * Stream Identifier\n     * This is the MPEG-TS stream identifier +1\n     * 0 means unknown\n     */\n    int stream_identifier;\n\n    int64_t interleaver_chunk_size;\n    int64_t interleaver_chunk_duration;\n\n    /**\n     * stream probing state\n     * -1   -> probing finished\n     *  0   -> no probing requested\n     * rest -> perform probing with request_probe being the minimum score to accept.\n     * NOT PART OF PUBLIC API\n     */\n    int request_probe;\n    /**\n     * Indicates that everything up to the next keyframe\n     * should be discarded.\n     */\n    int skip_to_keyframe;\n\n    /**\n     * Number of samples to skip at the start of the frame decoded from the next packet.\n     */\n    int skip_samples;\n\n    /**\n     * Number of internally decoded frames, used internally in libavformat, do not access\n     * its lifetime differs from info which is why it is not in that structure.\n     */\n    int nb_decoded_frames;\n\n    /**\n     * Timestamp offset added to timestamps before muxing\n     * NOT PART OF PUBLIC API\n     */\n    int64_t mux_ts_offset;\n\n    /**\n     * Internal data to check for wrapping of the time stamp\n     */\n    int64_t pts_wrap_reference;\n\n    /**\n     * Options for behavior, when a wrap is detected.\n     *\n     * Defined by AV_PTS_WRAP_ values.\n     *\n     * If correction is enabled, there are two possibilities:\n     * If the first time stamp is near the wrap point, the wrap offset\n     * will be subtracted, which will create negative time stamps.\n     * Otherwise the offset will be added.\n     */\n    int pts_wrap_behavior;\n\n    /**\n     * Internal data to prevent doing update_initial_durations() twice\n     */\n    int update_initial_durations_done;\n\n    /**\n     * Internal data to generate dts from pts\n     */\n    int64_t pts_reorder_error[MAX_REORDER_DELAY+1];\n    uint8_t pts_reorder_error_count[MAX_REORDER_DELAY+1];\n\n    /**\n     * Internal data to analyze DTS and detect faulty mpeg streams\n     */\n    int64_t last_dts_for_order_check;\n    uint8_t dts_ordered;\n    uint8_t dts_misordered;\n\n} AVStream;\n\nAVRational av_stream_get_r_frame_rate(const AVStream *s);\nvoid       av_stream_set_r_frame_rate(AVStream *s, AVRational r);\n\n#define AV_PROGRAM_RUNNING 1\n\n/**\n * New fields can be added to the end with minor version bumps.\n * Removal, reordering and changes to existing fields require a major\n * version bump.\n * sizeof(AVProgram) must not be used outside libav*.\n */\ntypedef struct AVProgram {\n    int            id;\n    int            flags;\n    enum AVDiscard discard;        ///< selects which program to discard and which to feed to the caller\n    unsigned int   *stream_index;\n    unsigned int   nb_stream_indexes;\n    AVDictionary *metadata;\n\n    int program_num;\n    int pmt_pid;\n    int pcr_pid;\n\n    /*****************************************************************\n     * All fields below this line are not part of the public API. They\n     * may not be used outside of libavformat and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    int64_t start_time;\n    int64_t end_time;\n\n    int64_t pts_wrap_reference;    ///< reference dts for wrap detection\n    int pts_wrap_behavior;         ///< behavior on wrap detection\n} AVProgram;\n\n#define AVFMTCTX_NOHEADER      0x0001 /**< signal that no header is present\n                                         (streams are added dynamically) */\n\ntypedef struct AVChapter {\n    int id;                 ///< unique ID to identify the chapter\n    AVRational time_base;   ///< time base in which the start/end timestamps are specified\n    int64_t start, end;     ///< chapter start/end time in time_base units\n    AVDictionary *metadata;\n} AVChapter;\n\n\n/**\n * Callback used by devices to communicate with application.\n */\ntypedef int (*av_format_control_message)(struct AVFormatContext *s, int type,\n                                         void *data, size_t data_size);\n\n\n/**\n * The duration of a video can be estimated through various ways, and this enum can be used\n * to know how the duration was estimated.\n */\nenum AVDurationEstimationMethod {\n    AVFMT_DURATION_FROM_PTS,    ///< Duration accurately estimated from PTSes\n    AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration\n    AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate)\n};\n\ntypedef struct AVFormatInternal AVFormatInternal;\n\n/**\n * Format I/O context.\n * New fields can be added to the end with minor version bumps.\n * Removal, reordering and changes to existing fields require a major\n * version bump.\n * sizeof(AVFormatContext) must not be used outside libav*, use\n * avformat_alloc_context() to create an AVFormatContext.\n */\ntypedef struct AVFormatContext {\n    /**\n     * A class for logging and @ref avoptions. Set by avformat_alloc_context().\n     * Exports (de)muxer private options if they exist.\n     */\n    const AVClass *av_class;\n\n    /**\n     * The input container format.\n     *\n     * Demuxing only, set by avformat_open_input().\n     */\n    struct AVInputFormat *iformat;\n\n    /**\n     * The output container format.\n     *\n     * Muxing only, must be set by the caller before avformat_write_header().\n     */\n    struct AVOutputFormat *oformat;\n\n    /**\n     * Format private data. This is an AVOptions-enabled struct\n     * if and only if iformat/oformat.priv_class is not NULL.\n     *\n     * - muxing: set by avformat_write_header()\n     * - demuxing: set by avformat_open_input()\n     */\n    void *priv_data;\n\n    /**\n     * I/O context.\n     *\n     * - demuxing: either set by the user before avformat_open_input() (then\n     *             the user must close it manually) or set by avformat_open_input().\n     * - muxing: set by the user before avformat_write_header(). The caller must\n     *           take care of closing / freeing the IO context.\n     *\n     * Do NOT set this field if AVFMT_NOFILE flag is set in\n     * iformat/oformat.flags. In such a case, the (de)muxer will handle\n     * I/O in some other way and this field will be NULL.\n     */\n    AVIOContext *pb;\n\n    /* stream info */\n    int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */\n\n    /**\n     * Number of elements in AVFormatContext.streams.\n     *\n     * Set by avformat_new_stream(), must not be modified by any other code.\n     */\n    unsigned int nb_streams;\n    /**\n     * A list of all streams in the file. New streams are created with\n     * avformat_new_stream().\n     *\n     * - demuxing: streams are created by libavformat in avformat_open_input().\n     *             If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also\n     *             appear in av_read_frame().\n     * - muxing: streams are created by the user before avformat_write_header().\n     *\n     * Freed by libavformat in avformat_free_context().\n     */\n    AVStream **streams;\n\n    /**\n     * input or output filename\n     *\n     * - demuxing: set by avformat_open_input()\n     * - muxing: may be set by the caller before avformat_write_header()\n     */\n    char filename[1024];\n\n    /**\n     * Position of the first frame of the component, in\n     * AV_TIME_BASE fractional seconds. NEVER set this value directly:\n     * It is deduced from the AVStream values.\n     *\n     * Demuxing only, set by libavformat.\n     */\n    int64_t start_time;\n\n    /**\n     * Duration of the stream, in AV_TIME_BASE fractional\n     * seconds. Only set this value if you know none of the individual stream\n     * durations and also do not set any of them. This is deduced from the\n     * AVStream values if not set.\n     *\n     * Demuxing only, set by libavformat.\n     */\n    int64_t duration;\n\n    /**\n     * Total stream bitrate in bit/s, 0 if not\n     * available. Never set it directly if the file_size and the\n     * duration are known as FFmpeg can compute it automatically.\n     */\n    int bit_rate;\n\n    unsigned int packet_size;\n    int max_delay;\n\n    int flags;\n#define AVFMT_FLAG_GENPTS       0x0001 ///< Generate missing pts even if it requires parsing future frames.\n#define AVFMT_FLAG_IGNIDX       0x0002 ///< Ignore index.\n#define AVFMT_FLAG_NONBLOCK     0x0004 ///< Do not block when reading packets from input.\n#define AVFMT_FLAG_IGNDTS       0x0008 ///< Ignore DTS on frames that contain both DTS & PTS\n#define AVFMT_FLAG_NOFILLIN     0x0010 ///< Do not infer any values from other values, just return what is stored in the container\n#define AVFMT_FLAG_NOPARSE      0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled\n#define AVFMT_FLAG_NOBUFFER     0x0040 ///< Do not buffer frames when possible\n#define AVFMT_FLAG_CUSTOM_IO    0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it.\n#define AVFMT_FLAG_DISCARD_CORRUPT  0x0100 ///< Discard frames marked corrupted\n#define AVFMT_FLAG_FLUSH_PACKETS    0x0200 ///< Flush the AVIOContext every packet.\n#define AVFMT_FLAG_MP4A_LATM    0x8000 ///< Enable RTP MP4A-LATM payload\n#define AVFMT_FLAG_SORT_DTS    0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down)\n#define AVFMT_FLAG_PRIV_OPT    0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted)\n#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate.\n\n    /**\n     * Maximum size of the data read from input for determining\n     * the input container format.\n     * Demuxing only, set by the caller before avformat_open_input().\n     */\n    unsigned int probesize;\n\n    /**\n     * Maximum duration (in AV_TIME_BASE units) of the data read\n     * from input in avformat_find_stream_info().\n     * Demuxing only, set by the caller before avformat_find_stream_info().\n     */\n    int max_analyze_duration;\n\n    const uint8_t *key;\n    int keylen;\n\n    unsigned int nb_programs;\n    AVProgram **programs;\n\n    /**\n     * Forced video codec_id.\n     * Demuxing: Set by user.\n     */\n    enum AVCodecID video_codec_id;\n\n    /**\n     * Forced audio codec_id.\n     * Demuxing: Set by user.\n     */\n    enum AVCodecID audio_codec_id;\n\n    /**\n     * Forced subtitle codec_id.\n     * Demuxing: Set by user.\n     */\n    enum AVCodecID subtitle_codec_id;\n\n    /**\n     * Maximum amount of memory in bytes to use for the index of each stream.\n     * If the index exceeds this size, entries will be discarded as\n     * needed to maintain a smaller size. This can lead to slower or less\n     * accurate seeking (depends on demuxer).\n     * Demuxers for which a full in-memory index is mandatory will ignore\n     * this.\n     * - muxing: unused\n     * - demuxing: set by user\n     */\n    unsigned int max_index_size;\n\n    /**\n     * Maximum amount of memory in bytes to use for buffering frames\n     * obtained from realtime capture devices.\n     */\n    unsigned int max_picture_buffer;\n\n    /**\n     * Number of chapters in AVChapter array.\n     * When muxing, chapters are normally written in the file header,\n     * so nb_chapters should normally be initialized before write_header\n     * is called. Some muxers (e.g. mov and mkv) can also write chapters\n     * in the trailer.  To write chapters in the trailer, nb_chapters\n     * must be zero when write_header is called and non-zero when\n     * write_trailer is called.\n     * - muxing: set by user\n     * - demuxing: set by libavformat\n     */\n    unsigned int nb_chapters;\n    AVChapter **chapters;\n\n    /**\n     * Metadata that applies to the whole file.\n     *\n     * - demuxing: set by libavformat in avformat_open_input()\n     * - muxing: may be set by the caller before avformat_write_header()\n     *\n     * Freed by libavformat in avformat_free_context().\n     */\n    AVDictionary *metadata;\n\n    /**\n     * Start time of the stream in real world time, in microseconds\n     * since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the\n     * stream was captured at this real world time.\n     * Muxing only, set by the caller before avformat_write_header().\n     */\n    int64_t start_time_realtime;\n\n    /**\n     * The number of frames used for determining the framerate in\n     * avformat_find_stream_info().\n     * Demuxing only, set by the caller before avformat_find_stream_info().\n     */\n    int fps_probe_size;\n\n    /**\n     * Error recognition; higher values will detect more errors but may\n     * misdetect some more or less valid parts as errors.\n     * Demuxing only, set by the caller before avformat_open_input().\n     */\n    int error_recognition;\n\n    /**\n     * Custom interrupt callbacks for the I/O layer.\n     *\n     * demuxing: set by the user before avformat_open_input().\n     * muxing: set by the user before avformat_write_header()\n     * (mainly useful for AVFMT_NOFILE formats). The callback\n     * should also be passed to avio_open2() if it's used to\n     * open the file.\n     */\n    AVIOInterruptCB interrupt_callback;\n\n    /**\n     * Flags to enable debugging.\n     */\n    int debug;\n#define FF_FDEBUG_TS        0x0001\n\n    /**\n     * Maximum buffering duration for interleaving.\n     *\n     * To ensure all the streams are interleaved correctly,\n     * av_interleaved_write_frame() will wait until it has at least one packet\n     * for each stream before actually writing any packets to the output file.\n     * When some streams are \"sparse\" (i.e. there are large gaps between\n     * successive packets), this can result in excessive buffering.\n     *\n     * This field specifies the maximum difference between the timestamps of the\n     * first and the last packet in the muxing queue, above which libavformat\n     * will output a packet regardless of whether it has queued a packet for all\n     * the streams.\n     *\n     * Muxing only, set by the caller before avformat_write_header().\n     */\n    int64_t max_interleave_delta;\n\n    /**\n     * Transport stream id.\n     * This will be moved into demuxer private options. Thus no API/ABI compatibility\n     */\n    int ts_id;\n\n    /**\n     * Audio preload in microseconds.\n     * Note, not all formats support this and unpredictable things may happen if it is used when not supported.\n     * - encoding: Set by user via AVOptions (NO direct access)\n     * - decoding: unused\n     */\n    int audio_preload;\n\n    /**\n     * Max chunk time in microseconds.\n     * Note, not all formats support this and unpredictable things may happen if it is used when not supported.\n     * - encoding: Set by user via AVOptions (NO direct access)\n     * - decoding: unused\n     */\n    int max_chunk_duration;\n\n    /**\n     * Max chunk size in bytes\n     * Note, not all formats support this and unpredictable things may happen if it is used when not supported.\n     * - encoding: Set by user via AVOptions (NO direct access)\n     * - decoding: unused\n     */\n    int max_chunk_size;\n\n    /**\n     * forces the use of wallclock timestamps as pts/dts of packets\n     * This has undefined results in the presence of B frames.\n     * - encoding: unused\n     * - decoding: Set by user via AVOptions (NO direct access)\n     */\n    int use_wallclock_as_timestamps;\n\n    /**\n     * Avoid negative timestamps during muxing.\n     *  0 -> allow negative timestamps\n     *  1 -> avoid negative timestamps\n     * -1 -> choose automatically (default)\n     * Note, this only works when interleave_packet_per_dts is in use.\n     * - encoding: Set by user via AVOptions (NO direct access)\n     * - decoding: unused\n     */\n    int avoid_negative_ts;\n\n    /**\n     * avio flags, used to force AVIO_FLAG_DIRECT.\n     * - encoding: unused\n     * - decoding: Set by user via AVOptions (NO direct access)\n     */\n    int avio_flags;\n\n    /**\n     * The duration field can be estimated through various ways, and this field can be used\n     * to know how the duration was estimated.\n     * - encoding: unused\n     * - decoding: Read by user via AVOptions (NO direct access)\n     */\n    enum AVDurationEstimationMethod duration_estimation_method;\n\n    /**\n     * Skip initial bytes when opening stream\n     * - encoding: unused\n     * - decoding: Set by user via AVOptions (NO direct access)\n     */\n    unsigned int skip_initial_bytes;\n\n    /**\n     * Correct single timestamp overflows\n     * - encoding: unused\n     * - decoding: Set by user via AVOptions (NO direct access)\n     */\n    unsigned int correct_ts_overflow;\n\n    /**\n     * Force seeking to any (also non key) frames.\n     * - encoding: unused\n     * - decoding: Set by user via AVOptions (NO direct access)\n     */\n    int seek2any;\n\n    /**\n     * Flush the I/O context after each packet.\n     * - encoding: Set by user via AVOptions (NO direct access)\n     * - decoding: unused\n     */\n    int flush_packets;\n\n    /**\n     * format probing score.\n     * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes\n     * the format.\n     * - encoding: unused\n     * - decoding: set by avformat, read by user via av_format_get_probe_score() (NO direct access)\n     */\n    int probe_score;\n\n    /*****************************************************************\n     * All fields below this line are not part of the public API. They\n     * may not be used outside of libavformat and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n\n    /**\n     * This buffer is only needed when packets were already buffered but\n     * not decoded, for example to get the codec parameters in MPEG\n     * streams.\n     */\n    struct AVPacketList *packet_buffer;\n    struct AVPacketList *packet_buffer_end;\n\n    /* av_seek_frame() support */\n    int64_t data_offset; /**< offset of the first packet */\n\n    /**\n     * Raw packets from the demuxer, prior to parsing and decoding.\n     * This buffer is used for buffering packets until the codec can\n     * be identified, as parsing cannot be done without knowing the\n     * codec.\n     */\n    struct AVPacketList *raw_packet_buffer;\n    struct AVPacketList *raw_packet_buffer_end;\n    /**\n     * Packets split by the parser get queued here.\n     */\n    struct AVPacketList *parse_queue;\n    struct AVPacketList *parse_queue_end;\n    /**\n     * Remaining size available for raw_packet_buffer, in bytes.\n     */\n#define RAW_PACKET_BUFFER_SIZE 2500000\n    int raw_packet_buffer_remaining_size;\n\n    /**\n     * Offset to remap timestamps to be non-negative.\n     * Expressed in timebase units.\n     * @see AVStream.mux_ts_offset\n     */\n    int64_t offset;\n\n    /**\n     * Timebase for the timestamp offset.\n     */\n    AVRational offset_timebase;\n\n    /**\n     * An opaque field for libavformat internal usage.\n     * Must not be accessed in any way by callers.\n     */\n    AVFormatInternal *internal;\n\n    /**\n     * IO repositioned flag.\n     * This is set by avformat when the underlaying IO context read pointer\n     * is repositioned, for example when doing byte based seeking.\n     * Demuxers can use the flag to detect such changes.\n     */\n    int io_repositioned;\n\n    /**\n     * Forced video codec.\n     * This allows forcing a specific decoder, even when there are multiple with\n     * the same codec_id.\n     * Demuxing: Set by user via av_format_set_video_codec (NO direct access).\n     */\n    AVCodec *video_codec;\n\n    /**\n     * Forced audio codec.\n     * This allows forcing a specific decoder, even when there are multiple with\n     * the same codec_id.\n     * Demuxing: Set by user via av_format_set_audio_codec (NO direct access).\n     */\n    AVCodec *audio_codec;\n\n    /**\n     * Forced subtitle codec.\n     * This allows forcing a specific decoder, even when there are multiple with\n     * the same codec_id.\n     * Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access).\n     */\n    AVCodec *subtitle_codec;\n\n    /**\n     * Number of bytes to be written as padding in a metadata header.\n     * Demuxing: Unused.\n     * Muxing: Set by user via av_format_set_metadata_header_padding.\n     */\n    int metadata_header_padding;\n\n    /**\n     * User data.\n     * This is a place for some private data of the user.\n     * Mostly usable with control_message_cb or any future callbacks in device's context.\n     */\n    void *opaque;\n\n    /**\n     * Callback used by devices to communicate with application.\n     */\n    av_format_control_message control_message_cb;\n\n    /**\n     * Output timestamp offset, in microseconds.\n     * Muxing: set by user via AVOptions (NO direct access)\n     */\n    int64_t output_ts_offset;\n} AVFormatContext;\n\nint av_format_get_probe_score(const AVFormatContext *s);\nAVCodec * av_format_get_video_codec(const AVFormatContext *s);\nvoid      av_format_set_video_codec(AVFormatContext *s, AVCodec *c);\nAVCodec * av_format_get_audio_codec(const AVFormatContext *s);\nvoid      av_format_set_audio_codec(AVFormatContext *s, AVCodec *c);\nAVCodec * av_format_get_subtitle_codec(const AVFormatContext *s);\nvoid      av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c);\nint       av_format_get_metadata_header_padding(const AVFormatContext *s);\nvoid      av_format_set_metadata_header_padding(AVFormatContext *s, int c);\nvoid *    av_format_get_opaque(const AVFormatContext *s);\nvoid      av_format_set_opaque(AVFormatContext *s, void *opaque);\nav_format_control_message av_format_get_control_message_cb(const AVFormatContext *s);\nvoid      av_format_set_control_message_cb(AVFormatContext *s, av_format_control_message callback);\n\n/**\n * Returns the method used to set ctx->duration.\n *\n * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE.\n */\nenum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx);\n\ntypedef struct AVPacketList {\n    AVPacket pkt;\n    struct AVPacketList *next;\n} AVPacketList;\n\n\n/**\n * @defgroup lavf_core Core functions\n * @ingroup libavf\n *\n * Functions for querying libavformat capabilities, allocating core structures,\n * etc.\n * @{\n */\n\n/**\n * Return the LIBAVFORMAT_VERSION_INT constant.\n */\nunsigned avformat_version(void);\n\n/**\n * Return the libavformat build-time configuration.\n */\nconst char *avformat_configuration(void);\n\n/**\n * Return the libavformat license.\n */\nconst char *avformat_license(void);\n\n/**\n * Initialize libavformat and register all the muxers, demuxers and\n * protocols. If you do not call this function, then you can select\n * exactly which formats you want to support.\n *\n * @see av_register_input_format()\n * @see av_register_output_format()\n */\nvoid av_register_all(void);\n\nvoid av_register_input_format(AVInputFormat *format);\nvoid av_register_output_format(AVOutputFormat *format);\n\n/**\n * Do global initialization of network components. This is optional,\n * but recommended, since it avoids the overhead of implicitly\n * doing the setup for each session.\n *\n * Calling this function will become mandatory if using network\n * protocols at some major version bump.\n */\nint avformat_network_init(void);\n\n/**\n * Undo the initialization done by avformat_network_init.\n */\nint avformat_network_deinit(void);\n\n/**\n * If f is NULL, returns the first registered input format,\n * if f is non-NULL, returns the next registered input format after f\n * or NULL if f is the last one.\n */\nAVInputFormat  *av_iformat_next(AVInputFormat  *f);\n\n/**\n * If f is NULL, returns the first registered output format,\n * if f is non-NULL, returns the next registered output format after f\n * or NULL if f is the last one.\n */\nAVOutputFormat *av_oformat_next(AVOutputFormat *f);\n\n/**\n * Allocate an AVFormatContext.\n * avformat_free_context() can be used to free the context and everything\n * allocated by the framework within it.\n */\nAVFormatContext *avformat_alloc_context(void);\n\n/**\n * Free an AVFormatContext and all its streams.\n * @param s context to free\n */\nvoid avformat_free_context(AVFormatContext *s);\n\n/**\n * Get the AVClass for AVFormatContext. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *avformat_get_class(void);\n\n/**\n * Add a new stream to a media file.\n *\n * When demuxing, it is called by the demuxer in read_header(). If the\n * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also\n * be called in read_packet().\n *\n * When muxing, should be called by the user before avformat_write_header().\n *\n * User is required to call avcodec_close() and avformat_free_context() to\n * clean up the allocation by avformat_new_stream().\n *\n * @param s media file handle\n * @param c If non-NULL, the AVCodecContext corresponding to the new stream\n * will be initialized to use this codec. This is needed for e.g. codec-specific\n * defaults to be set, so codec should be provided if it is known.\n *\n * @return newly created stream or NULL on error.\n */\nAVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c);\n\nAVProgram *av_new_program(AVFormatContext *s, int id);\n\n/**\n * @}\n */\n\n\n#if FF_API_ALLOC_OUTPUT_CONTEXT\n/**\n * @deprecated deprecated in favor of avformat_alloc_output_context2()\n */\nattribute_deprecated\nAVFormatContext *avformat_alloc_output_context(const char *format,\n                                               AVOutputFormat *oformat,\n                                               const char *filename);\n#endif\n\n/**\n * Allocate an AVFormatContext for an output format.\n * avformat_free_context() can be used to free the context and\n * everything allocated by the framework within it.\n *\n * @param *ctx is set to the created format context, or to NULL in\n * case of failure\n * @param oformat format to use for allocating the context, if NULL\n * format_name and filename are used instead\n * @param format_name the name of output format to use for allocating the\n * context, if NULL filename is used instead\n * @param filename the name of the filename to use for allocating the\n * context, may be NULL\n * @return >= 0 in case of success, a negative AVERROR code in case of\n * failure\n */\nint avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat,\n                                   const char *format_name, const char *filename);\n\n/**\n * @addtogroup lavf_decoding\n * @{\n */\n\n/**\n * Find AVInputFormat based on the short name of the input format.\n */\nAVInputFormat *av_find_input_format(const char *short_name);\n\n/**\n * Guess the file format.\n *\n * @param pd        data to be probed\n * @param is_opened Whether the file is already opened; determines whether\n *                  demuxers with or without AVFMT_NOFILE are probed.\n */\nAVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened);\n\n/**\n * Guess the file format.\n *\n * @param pd        data to be probed\n * @param is_opened Whether the file is already opened; determines whether\n *                  demuxers with or without AVFMT_NOFILE are probed.\n * @param score_max A probe score larger that this is required to accept a\n *                  detection, the variable is set to the actual detection\n *                  score afterwards.\n *                  If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended\n *                  to retry with a larger probe buffer.\n */\nAVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max);\n\n/**\n * Guess the file format.\n *\n * @param is_opened Whether the file is already opened; determines whether\n *                  demuxers with or without AVFMT_NOFILE are probed.\n * @param score_ret The score of the best detection.\n */\nAVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret);\n\n/**\n * Probe a bytestream to determine the input format. Each time a probe returns\n * with a score that is too low, the probe buffer size is increased and another\n * attempt is made. When the maximum probe size is reached, the input format\n * with the highest score is returned.\n *\n * @param pb the bytestream to probe\n * @param fmt the input format is put here\n * @param filename the filename of the stream\n * @param logctx the log context\n * @param offset the offset within the bytestream to probe from\n * @param max_probe_size the maximum probe buffer size (zero for default)\n * @return the score in case of success, a negative value corresponding to an\n *         the maximal score is AVPROBE_SCORE_MAX\n * AVERROR code otherwise\n */\nint av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,\n                           const char *filename, void *logctx,\n                           unsigned int offset, unsigned int max_probe_size);\n\n/**\n * Like av_probe_input_buffer2() but returns 0 on success\n */\nint av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,\n                          const char *filename, void *logctx,\n                          unsigned int offset, unsigned int max_probe_size);\n\n/**\n * Open an input stream and read the header. The codecs are not opened.\n * The stream must be closed with avformat_close_input().\n *\n * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context).\n *           May be a pointer to NULL, in which case an AVFormatContext is allocated by this\n *           function and written into ps.\n *           Note that a user-supplied AVFormatContext will be freed on failure.\n * @param filename Name of the stream to open.\n * @param fmt If non-NULL, this parameter forces a specific input format.\n *            Otherwise the format is autodetected.\n * @param options  A dictionary filled with AVFormatContext and demuxer-private options.\n *                 On return this parameter will be destroyed and replaced with a dict containing\n *                 options that were not found. May be NULL.\n *\n * @return 0 on success, a negative AVERROR on failure.\n *\n * @note If you want to use custom IO, preallocate the format context and set its pb field.\n */\nint avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);\n\nattribute_deprecated\nint av_demuxer_open(AVFormatContext *ic);\n\n#if FF_API_FORMAT_PARAMETERS\n/**\n * Read packets of a media file to get stream information. This\n * is useful for file formats with no headers such as MPEG. This\n * function also computes the real framerate in case of MPEG-2 repeat\n * frame mode.\n * The logical file position is not changed by this function;\n * examined packets may be buffered for later processing.\n *\n * @param ic media file handle\n * @return >=0 if OK, AVERROR_xxx on error\n * @todo Let the user decide somehow what information is needed so that\n *       we do not waste time getting stuff the user does not need.\n *\n * @deprecated use avformat_find_stream_info.\n */\nattribute_deprecated\nint av_find_stream_info(AVFormatContext *ic);\n#endif\n\n/**\n * Read packets of a media file to get stream information. This\n * is useful for file formats with no headers such as MPEG. This\n * function also computes the real framerate in case of MPEG-2 repeat\n * frame mode.\n * The logical file position is not changed by this function;\n * examined packets may be buffered for later processing.\n *\n * @param ic media file handle\n * @param options  If non-NULL, an ic.nb_streams long array of pointers to\n *                 dictionaries, where i-th member contains options for\n *                 codec corresponding to i-th stream.\n *                 On return each dictionary will be filled with options that were not found.\n * @return >=0 if OK, AVERROR_xxx on error\n *\n * @note this function isn't guaranteed to open all the codecs, so\n *       options being non-empty at return is a perfectly normal behavior.\n *\n * @todo Let the user decide somehow what information is needed so that\n *       we do not waste time getting stuff the user does not need.\n */\nint avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options);\n\n/**\n * Find the programs which belong to a given stream.\n *\n * @param ic    media file handle\n * @param last  the last found program, the search will start after this\n *              program, or from the beginning if it is NULL\n * @param s     stream index\n * @return the next program which belongs to s, NULL if no program is found or\n *         the last program is not among the programs of ic.\n */\nAVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s);\n\n/**\n * Find the \"best\" stream in the file.\n * The best stream is determined according to various heuristics as the most\n * likely to be what the user expects.\n * If the decoder parameter is non-NULL, av_find_best_stream will find the\n * default decoder for the stream's codec; streams for which no decoder can\n * be found are ignored.\n *\n * @param ic                media file handle\n * @param type              stream type: video, audio, subtitles, etc.\n * @param wanted_stream_nb  user-requested stream number,\n *                          or -1 for automatic selection\n * @param related_stream    try to find a stream related (eg. in the same\n *                          program) to this one, or -1 if none\n * @param decoder_ret       if non-NULL, returns the decoder for the\n *                          selected stream\n * @param flags             flags; none are currently defined\n * @return  the non-negative stream number in case of success,\n *          AVERROR_STREAM_NOT_FOUND if no stream with the requested type\n *          could be found,\n *          AVERROR_DECODER_NOT_FOUND if streams were found but no decoder\n * @note  If av_find_best_stream returns successfully and decoder_ret is not\n *        NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec.\n */\nint av_find_best_stream(AVFormatContext *ic,\n                        enum AVMediaType type,\n                        int wanted_stream_nb,\n                        int related_stream,\n                        AVCodec **decoder_ret,\n                        int flags);\n\n#if FF_API_READ_PACKET\n/**\n * @deprecated use AVFMT_FLAG_NOFILLIN | AVFMT_FLAG_NOPARSE to read raw\n * unprocessed packets\n *\n * Read a transport packet from a media file.\n *\n * This function is obsolete and should never be used.\n * Use av_read_frame() instead.\n *\n * @param s media file handle\n * @param pkt is filled\n * @return 0 if OK, AVERROR_xxx on error\n */\nattribute_deprecated\nint av_read_packet(AVFormatContext *s, AVPacket *pkt);\n#endif\n\n/**\n * Return the next frame of a stream.\n * This function returns what is stored in the file, and does not validate\n * that what is there are valid frames for the decoder. It will split what is\n * stored in the file into frames and return one for each call. It will not\n * omit invalid data between valid frames so as to give the decoder the maximum\n * information possible for decoding.\n *\n * If pkt->buf is NULL, then the packet is valid until the next\n * av_read_frame() or until avformat_close_input(). Otherwise the packet\n * is valid indefinitely. In both cases the packet must be freed with\n * av_free_packet when it is no longer needed. For video, the packet contains\n * exactly one frame. For audio, it contains an integer number of frames if each\n * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames\n * have a variable size (e.g. MPEG audio), then it contains one frame.\n *\n * pkt->pts, pkt->dts and pkt->duration are always set to correct\n * values in AVStream.time_base units (and guessed if the format cannot\n * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format\n * has B-frames, so it is better to rely on pkt->dts if you do not\n * decompress the payload.\n *\n * @return 0 if OK, < 0 on error or end of file\n */\nint av_read_frame(AVFormatContext *s, AVPacket *pkt);\n\n/**\n * Seek to the keyframe at timestamp.\n * 'timestamp' in 'stream_index'.\n *\n * @param s media file handle\n * @param stream_index If stream_index is (-1), a default\n * stream is selected, and timestamp is automatically converted\n * from AV_TIME_BASE units to the stream specific time_base.\n * @param timestamp Timestamp in AVStream.time_base units\n *        or, if no stream is specified, in AV_TIME_BASE units.\n * @param flags flags which select direction and seeking mode\n * @return >= 0 on success\n */\nint av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp,\n                  int flags);\n\n/**\n * Seek to timestamp ts.\n * Seeking will be done so that the point from which all active streams\n * can be presented successfully will be closest to ts and within min/max_ts.\n * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL.\n *\n * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and\n * are the file position (this may not be supported by all demuxers).\n * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames\n * in the stream with stream_index (this may not be supported by all demuxers).\n * Otherwise all timestamps are in units of the stream selected by stream_index\n * or if stream_index is -1, in AV_TIME_BASE units.\n * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as\n * keyframes (this may not be supported by all demuxers).\n * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored.\n *\n * @param s media file handle\n * @param stream_index index of the stream which is used as time base reference\n * @param min_ts smallest acceptable timestamp\n * @param ts target timestamp\n * @param max_ts largest acceptable timestamp\n * @param flags flags\n * @return >=0 on success, error code otherwise\n *\n * @note This is part of the new seek API which is still under construction.\n *       Thus do not use this yet. It may change at any time, do not expect\n *       ABI compatibility yet!\n */\nint avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags);\n\n/**\n * Start playing a network-based stream (e.g. RTSP stream) at the\n * current position.\n */\nint av_read_play(AVFormatContext *s);\n\n/**\n * Pause a network-based stream (e.g. RTSP stream).\n *\n * Use av_read_play() to resume it.\n */\nint av_read_pause(AVFormatContext *s);\n\n#if FF_API_CLOSE_INPUT_FILE\n/**\n * @deprecated use avformat_close_input()\n * Close a media file (but not its codecs).\n *\n * @param s media file handle\n */\nattribute_deprecated\nvoid av_close_input_file(AVFormatContext *s);\n#endif\n\n/**\n * Close an opened input AVFormatContext. Free it and all its contents\n * and set *s to NULL.\n */\nvoid avformat_close_input(AVFormatContext **s);\n/**\n * @}\n */\n\n#if FF_API_NEW_STREAM\n/**\n * Add a new stream to a media file.\n *\n * Can only be called in the read_header() function. If the flag\n * AVFMTCTX_NOHEADER is in the format context, then new streams\n * can be added in read_packet too.\n *\n * @param s media file handle\n * @param id file-format-dependent stream ID\n */\nattribute_deprecated\nAVStream *av_new_stream(AVFormatContext *s, int id);\n#endif\n\n#if FF_API_SET_PTS_INFO\n/**\n * @deprecated this function is not supposed to be called outside of lavf\n */\nattribute_deprecated\nvoid av_set_pts_info(AVStream *s, int pts_wrap_bits,\n                     unsigned int pts_num, unsigned int pts_den);\n#endif\n\n#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward\n#define AVSEEK_FLAG_BYTE     2 ///< seeking based on position in bytes\n#define AVSEEK_FLAG_ANY      4 ///< seek to any frame, even non-keyframes\n#define AVSEEK_FLAG_FRAME    8 ///< seeking based on frame number\n\n/**\n * @addtogroup lavf_encoding\n * @{\n */\n/**\n * Allocate the stream private data and write the stream header to\n * an output media file.\n *\n * @param s Media file handle, must be allocated with avformat_alloc_context().\n *          Its oformat field must be set to the desired output format;\n *          Its pb field must be set to an already opened AVIOContext.\n * @param options  An AVDictionary filled with AVFormatContext and muxer-private options.\n *                 On return this parameter will be destroyed and replaced with a dict containing\n *                 options that were not found. May be NULL.\n *\n * @return 0 on success, negative AVERROR on failure.\n *\n * @see av_opt_find, av_dict_set, avio_open, av_oformat_next.\n */\nint avformat_write_header(AVFormatContext *s, AVDictionary **options);\n\n/**\n * Write a packet to an output media file.\n *\n * This function passes the packet directly to the muxer, without any buffering\n * or reordering. The caller is responsible for correctly interleaving the\n * packets if the format requires it. Callers that want libavformat to handle\n * the interleaving should call av_interleaved_write_frame() instead of this\n * function.\n *\n * @param s media file handle\n * @param pkt The packet containing the data to be written. Note that unlike\n *            av_interleaved_write_frame(), this function does not take\n *            ownership of the packet passed to it (though some muxers may make\n *            an internal reference to the input packet).\n *            <br>\n *            This parameter can be NULL (at any time, not just at the end), in\n *            order to immediately flush data buffered within the muxer, for\n *            muxers that buffer up data internally before writing it to the\n *            output.\n *            <br>\n *            Packet's @ref AVPacket.stream_index \"stream_index\" field must be\n *            set to the index of the corresponding stream in @ref\n *            AVFormatContext.streams \"s->streams\". It is very strongly\n *            recommended that timing information (@ref AVPacket.pts \"pts\", @ref\n *            AVPacket.dts \"dts\", @ref AVPacket.duration \"duration\") is set to\n *            correct values.\n * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush\n *\n * @see av_interleaved_write_frame()\n */\nint av_write_frame(AVFormatContext *s, AVPacket *pkt);\n\n/**\n * Write a packet to an output media file ensuring correct interleaving.\n *\n * This function will buffer the packets internally as needed to make sure the\n * packets in the output file are properly interleaved in the order of\n * increasing dts. Callers doing their own interleaving should call\n * av_write_frame() instead of this function.\n *\n * @param s media file handle\n * @param pkt The packet containing the data to be written.\n *            <br>\n *            If the packet is reference-counted, this function will take\n *            ownership of this reference and unreference it later when it sees\n *            fit.\n *            The caller must not access the data through this reference after\n *            this function returns. If the packet is not reference-counted,\n *            libavformat will make a copy.\n *            <br>\n *            This parameter can be NULL (at any time, not just at the end), to\n *            flush the interleaving queues.\n *            <br>\n *            Packet's @ref AVPacket.stream_index \"stream_index\" field must be\n *            set to the index of the corresponding stream in @ref\n *            AVFormatContext.streams \"s->streams\". It is very strongly\n *            recommended that timing information (@ref AVPacket.pts \"pts\", @ref\n *            AVPacket.dts \"dts\", @ref AVPacket.duration \"duration\") is set to\n *            correct values.\n *\n * @return 0 on success, a negative AVERROR on error. Libavformat will always\n *         take care of freeing the packet, even if this function fails.\n *\n * @see av_write_frame(), AVFormatContext.max_interleave_delta\n */\nint av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);\n\n/**\n * Write a uncoded frame to an output media file.\n *\n * The frame must be correctly interleaved according to the container\n * specification; if not, then av_interleaved_write_frame() must be used.\n *\n * See av_interleaved_write_frame() for details.\n */\nint av_write_uncoded_frame(AVFormatContext *s, int stream_index,\n                           AVFrame *frame);\n\n/**\n * Write a uncoded frame to an output media file.\n *\n * If the muxer supports it, this function allows to write an AVFrame\n * structure directly, without encoding it into a packet.\n * It is mostly useful for devices and similar special muxers that use raw\n * video or PCM data and will not serialize it into a byte stream.\n *\n * To test whether it is possible to use it with a given muxer and stream,\n * use av_write_uncoded_frame_query().\n *\n * The caller gives up ownership of the frame and must not access it\n * afterwards.\n *\n * @return  >=0 for success, a negative code on error\n */\nint av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,\n                                       AVFrame *frame);\n\n/**\n * Test whether a muxer supports uncoded frame.\n *\n * @return  >=0 if an uncoded frame can be written to that muxer and stream,\n *          <0 if not\n */\nint av_write_uncoded_frame_query(AVFormatContext *s, int stream_index);\n\n/**\n * Write the stream trailer to an output media file and free the\n * file private data.\n *\n * May only be called after a successful call to avformat_write_header.\n *\n * @param s media file handle\n * @return 0 if OK, AVERROR_xxx on error\n */\nint av_write_trailer(AVFormatContext *s);\n\n/**\n * Return the output format in the list of registered output formats\n * which best matches the provided parameters, or return NULL if\n * there is no match.\n *\n * @param short_name if non-NULL checks if short_name matches with the\n * names of the registered formats\n * @param filename if non-NULL checks if filename terminates with the\n * extensions of the registered formats\n * @param mime_type if non-NULL checks if mime_type matches with the\n * MIME type of the registered formats\n */\nAVOutputFormat *av_guess_format(const char *short_name,\n                                const char *filename,\n                                const char *mime_type);\n\n/**\n * Guess the codec ID based upon muxer and filename.\n */\nenum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,\n                            const char *filename, const char *mime_type,\n                            enum AVMediaType type);\n\n/**\n * Get timing information for the data currently output.\n * The exact meaning of \"currently output\" depends on the format.\n * It is mostly relevant for devices that have an internal buffer and/or\n * work in real time.\n * @param s          media file handle\n * @param stream     stream in the media file\n * @param[out] dts   DTS of the last packet output for the stream, in stream\n *                   time_base units\n * @param[out] wall  absolute time when that packet whas output,\n *                   in microsecond\n * @return  0 if OK, AVERROR(ENOSYS) if the format does not support it\n * Note: some formats or devices may not allow to measure dts and wall\n * atomically.\n */\nint av_get_output_timestamp(struct AVFormatContext *s, int stream,\n                            int64_t *dts, int64_t *wall);\n\n\n/**\n * @}\n */\n\n\n/**\n * @defgroup lavf_misc Utility functions\n * @ingroup libavf\n * @{\n *\n * Miscellaneous utility functions related to both muxing and demuxing\n * (or neither).\n */\n\n/**\n * Send a nice hexadecimal dump of a buffer to the specified file stream.\n *\n * @param f The file stream pointer where the dump should be sent to.\n * @param buf buffer\n * @param size buffer size\n *\n * @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2\n */\nvoid av_hex_dump(FILE *f, const uint8_t *buf, int size);\n\n/**\n * Send a nice hexadecimal dump of a buffer to the log.\n *\n * @param avcl A pointer to an arbitrary struct of which the first field is a\n * pointer to an AVClass struct.\n * @param level The importance level of the message, lower values signifying\n * higher importance.\n * @param buf buffer\n * @param size buffer size\n *\n * @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2\n */\nvoid av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size);\n\n/**\n * Send a nice dump of a packet to the specified file stream.\n *\n * @param f The file stream pointer where the dump should be sent to.\n * @param pkt packet to dump\n * @param dump_payload True if the payload must be displayed, too.\n * @param st AVStream that the packet belongs to\n */\nvoid av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st);\n\n\n/**\n * Send a nice dump of a packet to the log.\n *\n * @param avcl A pointer to an arbitrary struct of which the first field is a\n * pointer to an AVClass struct.\n * @param level The importance level of the message, lower values signifying\n * higher importance.\n * @param pkt packet to dump\n * @param dump_payload True if the payload must be displayed, too.\n * @param st AVStream that the packet belongs to\n */\nvoid av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,\n                      AVStream *st);\n\n/**\n * Get the AVCodecID for the given codec tag tag.\n * If no codec id is found returns AV_CODEC_ID_NONE.\n *\n * @param tags list of supported codec_id-codec_tag pairs, as stored\n * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag\n * @param tag  codec tag to match to a codec ID\n */\nenum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag);\n\n/**\n * Get the codec tag for the given codec id id.\n * If no codec tag is found returns 0.\n *\n * @param tags list of supported codec_id-codec_tag pairs, as stored\n * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag\n * @param id   codec ID to match to a codec tag\n */\nunsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id);\n\n/**\n * Get the codec tag for the given codec id.\n *\n * @param tags list of supported codec_id - codec_tag pairs, as stored\n * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag\n * @param id codec id that should be searched for in the list\n * @param tag A pointer to the found tag\n * @return 0 if id was not found in tags, > 0 if it was found\n */\nint av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id,\n                      unsigned int *tag);\n\nint av_find_default_stream_index(AVFormatContext *s);\n\n/**\n * Get the index for a specific timestamp.\n *\n * @param st        stream that the timestamp belongs to\n * @param timestamp timestamp to retrieve the index for\n * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond\n *                 to the timestamp which is <= the requested one, if backward\n *                 is 0, then it will be >=\n *              if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise\n * @return < 0 if no such timestamp could be found\n */\nint av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);\n\n/**\n * Add an index entry into a sorted list. Update the entry if the list\n * already contains it.\n *\n * @param timestamp timestamp in the time base of the given stream\n */\nint av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,\n                       int size, int distance, int flags);\n\n\n/**\n * Split a URL string into components.\n *\n * The pointers to buffers for storing individual components may be null,\n * in order to ignore that component. Buffers for components not found are\n * set to empty strings. If the port is not found, it is set to a negative\n * value.\n *\n * @param proto the buffer for the protocol\n * @param proto_size the size of the proto buffer\n * @param authorization the buffer for the authorization\n * @param authorization_size the size of the authorization buffer\n * @param hostname the buffer for the host name\n * @param hostname_size the size of the hostname buffer\n * @param port_ptr a pointer to store the port number in\n * @param path the buffer for the path\n * @param path_size the size of the path buffer\n * @param url the URL to split\n */\nvoid av_url_split(char *proto,         int proto_size,\n                  char *authorization, int authorization_size,\n                  char *hostname,      int hostname_size,\n                  int *port_ptr,\n                  char *path,          int path_size,\n                  const char *url);\n\n\nvoid av_dump_format(AVFormatContext *ic,\n                    int index,\n                    const char *url,\n                    int is_output);\n\n/**\n * Return in 'buf' the path with '%d' replaced by a number.\n *\n * Also handles the '%0nd' format where 'n' is the total number\n * of digits and '%%'.\n *\n * @param buf destination buffer\n * @param buf_size destination buffer size\n * @param path numbered sequence string\n * @param number frame number\n * @return 0 if OK, -1 on format error\n */\nint av_get_frame_filename(char *buf, int buf_size,\n                          const char *path, int number);\n\n/**\n * Check whether filename actually is a numbered sequence generator.\n *\n * @param filename possible numbered sequence string\n * @return 1 if a valid numbered sequence string, 0 otherwise\n */\nint av_filename_number_test(const char *filename);\n\n/**\n * Generate an SDP for an RTP session.\n *\n * Note, this overwrites the id values of AVStreams in the muxer contexts\n * for getting unique dynamic payload types.\n *\n * @param ac array of AVFormatContexts describing the RTP streams. If the\n *           array is composed by only one context, such context can contain\n *           multiple AVStreams (one AVStream per RTP stream). Otherwise,\n *           all the contexts in the array (an AVCodecContext per RTP stream)\n *           must contain only one AVStream.\n * @param n_files number of AVCodecContexts contained in ac\n * @param buf buffer where the SDP will be stored (must be allocated by\n *            the caller)\n * @param size the size of the buffer\n * @return 0 if OK, AVERROR_xxx on error\n */\nint av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size);\n\n/**\n * Return a positive value if the given filename has one of the given\n * extensions, 0 otherwise.\n *\n * @param filename   file name to check against the given extensions\n * @param extensions a comma-separated list of filename extensions\n */\nint av_match_ext(const char *filename, const char *extensions);\n\n/**\n * Test if the given container can store a codec.\n *\n * @param ofmt           container to check for compatibility\n * @param codec_id       codec to potentially store in container\n * @param std_compliance standards compliance level, one of FF_COMPLIANCE_*\n *\n * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot.\n *         A negative number if this information is not available.\n */\nint avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance);\n\n/**\n * @defgroup riff_fourcc RIFF FourCCs\n * @{\n * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are\n * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the\n * following code:\n * @code\n * uint32_t tag = MKTAG('H', '2', '6', '4');\n * const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 };\n * enum AVCodecID id = av_codec_get_id(table, tag);\n * @endcode\n */\n/**\n * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID.\n */\nconst struct AVCodecTag *avformat_get_riff_video_tags(void);\n/**\n * @return the table mapping RIFF FourCCs for audio to AVCodecID.\n */\nconst struct AVCodecTag *avformat_get_riff_audio_tags(void);\n/**\n * @return the table mapping MOV FourCCs for video to libavcodec AVCodecID.\n */\nconst struct AVCodecTag *avformat_get_mov_video_tags(void);\n/**\n * @return the table mapping MOV FourCCs for audio to AVCodecID.\n */\nconst struct AVCodecTag *avformat_get_mov_audio_tags(void);\n\n/**\n * @}\n */\n\n/**\n * Guess the sample aspect ratio of a frame, based on both the stream and the\n * frame aspect ratio.\n *\n * Since the frame aspect ratio is set by the codec but the stream aspect ratio\n * is set by the demuxer, these two may not be equal. This function tries to\n * return the value that you should use if you would like to display the frame.\n *\n * Basic logic is to use the stream aspect ratio if it is set to something sane\n * otherwise use the frame aspect ratio. This way a container setting, which is\n * usually easy to modify can override the coded value in the frames.\n *\n * @param format the format context which the stream is part of\n * @param stream the stream which the frame is part of\n * @param frame the frame with the aspect ratio to be determined\n * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea\n */\nAVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame);\n\n/**\n * Guess the frame rate, based on both the container and codec information.\n *\n * @param ctx the format context which the stream is part of\n * @param stream the stream which the frame is part of\n * @param frame the frame for which the frame rate should be determined, may be NULL\n * @return the guessed (valid) frame rate, 0/1 if no idea\n */\nAVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame);\n\n/**\n * Check if the stream st contained in s is matched by the stream specifier\n * spec.\n *\n * See the \"stream specifiers\" chapter in the documentation for the syntax\n * of spec.\n *\n * @return  >0 if st is matched by spec;\n *          0  if st is not matched by spec;\n *          AVERROR code if spec is invalid\n *\n * @note  A stream specifier can match several streams in the format.\n */\nint avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,\n                                    const char *spec);\n\nint avformat_queue_attached_pictures(AVFormatContext *s);\n\n\n/**\n * @}\n */\n\n#endif /* AVFORMAT_AVFORMAT_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavformat/avio.h",
    "content": "/*\n * copyright (c) 2001 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n#ifndef AVFORMAT_AVIO_H\n#define AVFORMAT_AVIO_H\n\n/**\n * @file\n * @ingroup lavf_io\n * Buffered I/O operations\n */\n\n#include <stdint.h>\n\n#include \"libavutil/common.h\"\n#include \"libavutil/dict.h\"\n#include \"libavutil/log.h\"\n\n#include \"libavformat/version.h\"\n\n\n#define AVIO_SEEKABLE_NORMAL 0x0001 /**< Seeking works like for a local file */\n\n/**\n * Callback for checking whether to abort blocking functions.\n * AVERROR_EXIT is returned in this case by the interrupted\n * function. During blocking operations, callback is called with\n * opaque as parameter. If the callback returns 1, the\n * blocking operation will be aborted.\n *\n * No members can be added to this struct without a major bump, if\n * new elements have been added after this struct in AVFormatContext\n * or AVIOContext.\n */\ntypedef struct AVIOInterruptCB {\n    int (*callback)(void*);\n    void *opaque;\n} AVIOInterruptCB;\n\n/**\n * Bytestream IO Context.\n * New fields can be added to the end with minor version bumps.\n * Removal, reordering and changes to existing fields require a major\n * version bump.\n * sizeof(AVIOContext) must not be used outside libav*.\n *\n * @note None of the function pointers in AVIOContext should be called\n *       directly, they should only be set by the client application\n *       when implementing custom I/O. Normally these are set to the\n *       function pointers specified in avio_alloc_context()\n */\ntypedef struct AVIOContext {\n    /**\n     * A class for private options.\n     *\n     * If this AVIOContext is created by avio_open2(), av_class is set and\n     * passes the options down to protocols.\n     *\n     * If this AVIOContext is manually allocated, then av_class may be set by\n     * the caller.\n     *\n     * warning -- this field can be NULL, be sure to not pass this AVIOContext\n     * to any av_opt_* functions in that case.\n     */\n    const AVClass *av_class;\n    unsigned char *buffer;  /**< Start of the buffer. */\n    int buffer_size;        /**< Maximum buffer size */\n    unsigned char *buf_ptr; /**< Current position in the buffer */\n    unsigned char *buf_end; /**< End of the data, may be less than\n                                 buffer+buffer_size if the read function returned\n                                 less data than requested, e.g. for streams where\n                                 no more data has been received yet. */\n    void *opaque;           /**< A private pointer, passed to the read/write/seek/...\n                                 functions. */\n    int (*read_packet)(void *opaque, uint8_t *buf, int buf_size);\n    int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);\n    int64_t (*seek)(void *opaque, int64_t offset, int whence);\n    int64_t pos;            /**< position in the file of the current buffer */\n    int must_flush;         /**< true if the next seek should flush */\n    int eof_reached;        /**< true if eof reached */\n    int write_flag;         /**< true if open for writing */\n    int max_packet_size;\n    unsigned long checksum;\n    unsigned char *checksum_ptr;\n    unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size);\n    int error;              /**< contains the error code or 0 if no error happened */\n    /**\n     * Pause or resume playback for network streaming protocols - e.g. MMS.\n     */\n    int (*read_pause)(void *opaque, int pause);\n    /**\n     * Seek to a given timestamp in stream with the specified stream_index.\n     * Needed for some network streaming protocols which don't support seeking\n     * to byte position.\n     */\n    int64_t (*read_seek)(void *opaque, int stream_index,\n                         int64_t timestamp, int flags);\n    /**\n     * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.\n     */\n    int seekable;\n\n    /**\n     * max filesize, used to limit allocations\n     * This field is internal to libavformat and access from outside is not allowed.\n     */\n    int64_t maxsize;\n\n    /**\n     * avio_read and avio_write should if possible be satisfied directly\n     * instead of going through a buffer, and avio_seek will always\n     * call the underlying seek function directly.\n     */\n    int direct;\n\n    /**\n     * Bytes read statistic\n     * This field is internal to libavformat and access from outside is not allowed.\n     */\n    int64_t bytes_read;\n\n    /**\n     * seek statistic\n     * This field is internal to libavformat and access from outside is not allowed.\n     */\n    int seek_count;\n\n    /**\n     * writeout statistic\n     * This field is internal to libavformat and access from outside is not allowed.\n     */\n    int writeout_count;\n} AVIOContext;\n\n/* unbuffered I/O */\n\n/**\n * Return the name of the protocol that will handle the passed URL.\n *\n * NULL is returned if no protocol could be found for the given URL.\n *\n * @return Name of the protocol or NULL.\n */\nconst char *avio_find_protocol_name(const char *url);\n\n/**\n * Return AVIO_FLAG_* access flags corresponding to the access permissions\n * of the resource in url, or a negative value corresponding to an\n * AVERROR code in case of failure. The returned access flags are\n * masked by the value in flags.\n *\n * @note This function is intrinsically unsafe, in the sense that the\n * checked resource may change its existence or permission status from\n * one call to another. Thus you should not trust the returned value,\n * unless you are sure that no other processes are accessing the\n * checked resource.\n */\nint avio_check(const char *url, int flags);\n\n/**\n * Allocate and initialize an AVIOContext for buffered I/O. It must be later\n * freed with av_free().\n *\n * @param buffer Memory block for input/output operations via AVIOContext.\n *        The buffer must be allocated with av_malloc() and friends.\n * @param buffer_size The buffer size is very important for performance.\n *        For protocols with fixed blocksize it should be set to this blocksize.\n *        For others a typical size is a cache page, e.g. 4kb.\n * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise.\n * @param opaque An opaque pointer to user-specific data.\n * @param read_packet  A function for refilling the buffer, may be NULL.\n * @param write_packet A function for writing the buffer contents, may be NULL.\n *        The function may not change the input buffers content.\n * @param seek A function for seeking to specified byte position, may be NULL.\n *\n * @return Allocated AVIOContext or NULL on failure.\n */\nAVIOContext *avio_alloc_context(\n                  unsigned char *buffer,\n                  int buffer_size,\n                  int write_flag,\n                  void *opaque,\n                  int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),\n                  int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),\n                  int64_t (*seek)(void *opaque, int64_t offset, int whence));\n\nvoid avio_w8(AVIOContext *s, int b);\nvoid avio_write(AVIOContext *s, const unsigned char *buf, int size);\nvoid avio_wl64(AVIOContext *s, uint64_t val);\nvoid avio_wb64(AVIOContext *s, uint64_t val);\nvoid avio_wl32(AVIOContext *s, unsigned int val);\nvoid avio_wb32(AVIOContext *s, unsigned int val);\nvoid avio_wl24(AVIOContext *s, unsigned int val);\nvoid avio_wb24(AVIOContext *s, unsigned int val);\nvoid avio_wl16(AVIOContext *s, unsigned int val);\nvoid avio_wb16(AVIOContext *s, unsigned int val);\n\n/**\n * Write a NULL-terminated string.\n * @return number of bytes written.\n */\nint avio_put_str(AVIOContext *s, const char *str);\n\n/**\n * Convert an UTF-8 string to UTF-16LE and write it.\n * @return number of bytes written.\n */\nint avio_put_str16le(AVIOContext *s, const char *str);\n\n/**\n * Passing this as the \"whence\" parameter to a seek function causes it to\n * return the filesize without seeking anywhere. Supporting this is optional.\n * If it is not supported then the seek function will return <0.\n */\n#define AVSEEK_SIZE 0x10000\n\n/**\n * Oring this flag as into the \"whence\" parameter to a seek function causes it to\n * seek by any means (like reopening and linear reading) or other normally unreasonable\n * means that can be extremely slow.\n * This may be ignored by the seek code.\n */\n#define AVSEEK_FORCE 0x20000\n\n/**\n * fseek() equivalent for AVIOContext.\n * @return new position or AVERROR.\n */\nint64_t avio_seek(AVIOContext *s, int64_t offset, int whence);\n\n/**\n * Skip given number of bytes forward\n * @return new position or AVERROR.\n */\nint64_t avio_skip(AVIOContext *s, int64_t offset);\n\n/**\n * ftell() equivalent for AVIOContext.\n * @return position or AVERROR.\n */\nstatic av_always_inline int64_t avio_tell(AVIOContext *s)\n{\n    return avio_seek(s, 0, SEEK_CUR);\n}\n\n/**\n * Get the filesize.\n * @return filesize or AVERROR\n */\nint64_t avio_size(AVIOContext *s);\n\n/**\n * feof() equivalent for AVIOContext.\n * @return non zero if and only if end of file\n */\nint url_feof(AVIOContext *s);\n\n/** @warning currently size is limited */\nint avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3);\n\n/**\n * Force flushing of buffered data to the output s.\n *\n * Force the buffered data to be immediately written to the output,\n * without to wait to fill the internal buffer.\n */\nvoid avio_flush(AVIOContext *s);\n\n/**\n * Read size bytes from AVIOContext into buf.\n * @return number of bytes read or AVERROR\n */\nint avio_read(AVIOContext *s, unsigned char *buf, int size);\n\n/**\n * @name Functions for reading from AVIOContext\n * @{\n *\n * @note return 0 if EOF, so you cannot use it if EOF handling is\n *       necessary\n */\nint          avio_r8  (AVIOContext *s);\nunsigned int avio_rl16(AVIOContext *s);\nunsigned int avio_rl24(AVIOContext *s);\nunsigned int avio_rl32(AVIOContext *s);\nuint64_t     avio_rl64(AVIOContext *s);\nunsigned int avio_rb16(AVIOContext *s);\nunsigned int avio_rb24(AVIOContext *s);\nunsigned int avio_rb32(AVIOContext *s);\nuint64_t     avio_rb64(AVIOContext *s);\n/**\n * @}\n */\n\n/**\n * Read a string from pb into buf. The reading will terminate when either\n * a NULL character was encountered, maxlen bytes have been read, or nothing\n * more can be read from pb. The result is guaranteed to be NULL-terminated, it\n * will be truncated if buf is too small.\n * Note that the string is not interpreted or validated in any way, it\n * might get truncated in the middle of a sequence for multi-byte encodings.\n *\n * @return number of bytes read (is always <= maxlen).\n * If reading ends on EOF or error, the return value will be one more than\n * bytes actually read.\n */\nint avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen);\n\n/**\n * Read a UTF-16 string from pb and convert it to UTF-8.\n * The reading will terminate when either a null or invalid character was\n * encountered or maxlen bytes have been read.\n * @return number of bytes read (is always <= maxlen)\n */\nint avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen);\nint avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen);\n\n\n/**\n * @name URL open modes\n * The flags argument to avio_open must be one of the following\n * constants, optionally ORed with other flags.\n * @{\n */\n#define AVIO_FLAG_READ  1                                      /**< read-only */\n#define AVIO_FLAG_WRITE 2                                      /**< write-only */\n#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE)  /**< read-write pseudo flag */\n/**\n * @}\n */\n\n/**\n * Use non-blocking mode.\n * If this flag is set, operations on the context will return\n * AVERROR(EAGAIN) if they can not be performed immediately.\n * If this flag is not set, operations on the context will never return\n * AVERROR(EAGAIN).\n * Note that this flag does not affect the opening/connecting of the\n * context. Connecting a protocol will always block if necessary (e.g. on\n * network protocols) but never hang (e.g. on busy devices).\n * Warning: non-blocking protocols is work-in-progress; this flag may be\n * silently ignored.\n */\n#define AVIO_FLAG_NONBLOCK 8\n\n/**\n * Use direct mode.\n * avio_read and avio_write should if possible be satisfied directly\n * instead of going through a buffer, and avio_seek will always\n * call the underlying seek function directly.\n */\n#define AVIO_FLAG_DIRECT 0x8000\n\n/**\n * Create and initialize a AVIOContext for accessing the\n * resource indicated by url.\n * @note When the resource indicated by url has been opened in\n * read+write mode, the AVIOContext can be used only for writing.\n *\n * @param s Used to return the pointer to the created AVIOContext.\n * In case of failure the pointed to value is set to NULL.\n * @param url resource to access\n * @param flags flags which control how the resource indicated by url\n * is to be opened\n * @return >= 0 in case of success, a negative value corresponding to an\n * AVERROR code in case of failure\n */\nint avio_open(AVIOContext **s, const char *url, int flags);\n\n/**\n * Create and initialize a AVIOContext for accessing the\n * resource indicated by url.\n * @note When the resource indicated by url has been opened in\n * read+write mode, the AVIOContext can be used only for writing.\n *\n * @param s Used to return the pointer to the created AVIOContext.\n * In case of failure the pointed to value is set to NULL.\n * @param url resource to access\n * @param flags flags which control how the resource indicated by url\n * is to be opened\n * @param int_cb an interrupt callback to be used at the protocols level\n * @param options  A dictionary filled with protocol-private options. On return\n * this parameter will be destroyed and replaced with a dict containing options\n * that were not found. May be NULL.\n * @return >= 0 in case of success, a negative value corresponding to an\n * AVERROR code in case of failure\n */\nint avio_open2(AVIOContext **s, const char *url, int flags,\n               const AVIOInterruptCB *int_cb, AVDictionary **options);\n\n/**\n * Close the resource accessed by the AVIOContext s and free it.\n * This function can only be used if s was opened by avio_open().\n *\n * The internal buffer is automatically flushed before closing the\n * resource.\n *\n * @return 0 on success, an AVERROR < 0 on error.\n * @see avio_closep\n */\nint avio_close(AVIOContext *s);\n\n/**\n * Close the resource accessed by the AVIOContext *s, free it\n * and set the pointer pointing to it to NULL.\n * This function can only be used if s was opened by avio_open().\n *\n * The internal buffer is automatically flushed before closing the\n * resource.\n *\n * @return 0 on success, an AVERROR < 0 on error.\n * @see avio_close\n */\nint avio_closep(AVIOContext **s);\n\n\n/**\n * Open a write only memory stream.\n *\n * @param s new IO context\n * @return zero if no error.\n */\nint avio_open_dyn_buf(AVIOContext **s);\n\n/**\n * Return the written size and a pointer to the buffer. The buffer\n * must be freed with av_free().\n * Padding of FF_INPUT_BUFFER_PADDING_SIZE is added to the buffer.\n *\n * @param s IO context\n * @param pbuffer pointer to a byte buffer\n * @return the length of the byte buffer\n */\nint avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer);\n\n/**\n * Iterate through names of available protocols.\n *\n * @param opaque A private pointer representing current protocol.\n *        It must be a pointer to NULL on first iteration and will\n *        be updated by successive calls to avio_enum_protocols.\n * @param output If set to 1, iterate over output protocols,\n *               otherwise over input protocols.\n *\n * @return A static string containing the name of current protocol or NULL\n */\nconst char *avio_enum_protocols(void **opaque, int output);\n\n/**\n * Pause and resume playing - only meaningful if using a network streaming\n * protocol (e.g. MMS).\n *\n * @param h     IO context from which to call the read_pause function pointer\n * @param pause 1 for pause, 0 for resume\n */\nint     avio_pause(AVIOContext *h, int pause);\n\n/**\n * Seek to a given timestamp relative to some component stream.\n * Only meaningful if using a network streaming protocol (e.g. MMS.).\n *\n * @param h IO context from which to call the seek function pointers\n * @param stream_index The stream index that the timestamp is relative to.\n *        If stream_index is (-1) the timestamp should be in AV_TIME_BASE\n *        units from the beginning of the presentation.\n *        If a stream_index >= 0 is used and the protocol does not support\n *        seeking based on component streams, the call will fail.\n * @param timestamp timestamp in AVStream.time_base units\n *        or if there is no stream specified then in AV_TIME_BASE units.\n * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE\n *        and AVSEEK_FLAG_ANY. The protocol may silently ignore\n *        AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will\n *        fail if used and not supported.\n * @return >= 0 on success\n * @see AVInputFormat::read_seek\n */\nint64_t avio_seek_time(AVIOContext *h, int stream_index,\n                       int64_t timestamp, int flags);\n\n#endif /* AVFORMAT_AVIO_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavformat/version.h",
    "content": "/*\n * Version macros.\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFORMAT_VERSION_H\n#define AVFORMAT_VERSION_H\n\n/**\n * @file\n * @ingroup libavf\n * Libavformat version macros\n */\n\n#include \"libavutil/version.h\"\n\n#define LIBAVFORMAT_VERSION_MAJOR 55\n#define LIBAVFORMAT_VERSION_MINOR 33\n#define LIBAVFORMAT_VERSION_MICRO 100\n\n#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \\\n                                               LIBAVFORMAT_VERSION_MINOR, \\\n                                               LIBAVFORMAT_VERSION_MICRO)\n#define LIBAVFORMAT_VERSION     AV_VERSION(LIBAVFORMAT_VERSION_MAJOR,   \\\n                                           LIBAVFORMAT_VERSION_MINOR,   \\\n                                           LIBAVFORMAT_VERSION_MICRO)\n#define LIBAVFORMAT_BUILD       LIBAVFORMAT_VERSION_INT\n\n#define LIBAVFORMAT_IDENT       \"Lavf\" AV_STRINGIFY(LIBAVFORMAT_VERSION)\n\n/**\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n */\n#ifndef FF_API_REFERENCE_DTS\n#define FF_API_REFERENCE_DTS            (LIBAVFORMAT_VERSION_MAJOR < 56)\n#endif\n\n#ifndef FF_API_ALLOC_OUTPUT_CONTEXT\n#define FF_API_ALLOC_OUTPUT_CONTEXT    (LIBAVFORMAT_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_FORMAT_PARAMETERS\n#define FF_API_FORMAT_PARAMETERS       (LIBAVFORMAT_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_NEW_STREAM\n#define FF_API_NEW_STREAM              (LIBAVFORMAT_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_SET_PTS_INFO\n#define FF_API_SET_PTS_INFO            (LIBAVFORMAT_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_CLOSE_INPUT_FILE\n#define FF_API_CLOSE_INPUT_FILE        (LIBAVFORMAT_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_READ_PACKET\n#define FF_API_READ_PACKET             (LIBAVFORMAT_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_ASS_SSA\n#define FF_API_ASS_SSA                 (LIBAVFORMAT_VERSION_MAJOR < 56)\n#endif\n#ifndef FF_API_R_FRAME_RATE\n#define FF_API_R_FRAME_RATE            1\n#endif\n#endif /* AVFORMAT_VERSION_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/adler32.h",
    "content": "/*\n * copyright (c) 2006 Mans Rullgard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_ADLER32_H\n#define AVUTIL_ADLER32_H\n\n#include <stdint.h>\n#include \"attributes.h\"\n\n/**\n * @file\n * Public header for libavutil Adler32 hasher\n *\n * @defgroup lavu_adler32 Adler32\n * @ingroup lavu_crypto\n * @{\n */\n\n/**\n * Calculate the Adler32 checksum of a buffer.\n *\n * Passing the return value to a subsequent av_adler32_update() call\n * allows the checksum of multiple buffers to be calculated as though\n * they were concatenated.\n *\n * @param adler initial checksum value\n * @param buf   pointer to input buffer\n * @param len   size of input buffer\n * @return      updated checksum\n */\nunsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,\n                                unsigned int len) av_pure;\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_ADLER32_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/aes.h",
    "content": "/*\n * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_AES_H\n#define AVUTIL_AES_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n/**\n * @defgroup lavu_aes AES\n * @ingroup lavu_crypto\n * @{\n */\n\nextern const int av_aes_size;\n\nstruct AVAES;\n\n/**\n * Allocate an AVAES context.\n */\nstruct AVAES *av_aes_alloc(void);\n\n/**\n * Initialize an AVAES context.\n * @param key_bits 128, 192 or 256\n * @param decrypt 0 for encryption, 1 for decryption\n */\nint av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt);\n\n/**\n * Encrypt or decrypt a buffer using a previously initialized context.\n * @param count number of 16 byte blocks\n * @param dst destination array, can be equal to src\n * @param src source array, can be equal to dst\n * @param iv initialization vector for CBC mode, if NULL then ECB will be used\n * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_AES_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/attributes.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * Macro definitions for various function/variable attributes\n */\n\n#ifndef AVUTIL_ATTRIBUTES_H\n#define AVUTIL_ATTRIBUTES_H\n\n#ifdef __GNUC__\n#    define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y)\n#else\n#    define AV_GCC_VERSION_AT_LEAST(x,y) 0\n#endif\n\n#ifndef av_always_inline\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n#    define av_always_inline __attribute__((always_inline)) inline\n#elif defined(_MSC_VER)\n#    define av_always_inline __forceinline\n#else\n#    define av_always_inline inline\n#endif\n#endif\n\n#ifndef av_extern_inline\n#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__)\n#    define av_extern_inline extern inline\n#else\n#    define av_extern_inline inline\n#endif\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n#    define av_noinline __attribute__((noinline))\n#elif defined(_MSC_VER)\n#    define av_noinline __declspec(noinline)\n#else\n#    define av_noinline\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n#    define av_pure __attribute__((pure))\n#else\n#    define av_pure\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(2,6)\n#    define av_const __attribute__((const))\n#else\n#    define av_const\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(4,3)\n#    define av_cold __attribute__((cold))\n#else\n#    define av_cold\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__)\n#    define av_flatten __attribute__((flatten))\n#else\n#    define av_flatten\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n#    define attribute_deprecated __attribute__((deprecated))\n#elif defined(_MSC_VER)\n#    define attribute_deprecated __declspec(deprecated)\n#else\n#    define attribute_deprecated\n#endif\n\n/**\n * Disable warnings about deprecated features\n * This is useful for sections of code kept for backward compatibility and\n * scheduled for removal.\n */\n#ifndef AV_NOWARN_DEPRECATED\n#if AV_GCC_VERSION_AT_LEAST(4,6)\n#    define AV_NOWARN_DEPRECATED(code) \\\n        _Pragma(\"GCC diagnostic push\") \\\n        _Pragma(\"GCC diagnostic ignored \\\"-Wdeprecated-declarations\\\"\") \\\n        code \\\n        _Pragma(\"GCC diagnostic pop\")\n#elif defined(_MSC_VER)\n#    define AV_NOWARN_DEPRECATED(code) \\\n        __pragma(warning(push)) \\\n        __pragma(warning(disable : 4996)) \\\n        code; \\\n        __pragma(warning(pop))\n#else\n#    define AV_NOWARN_DEPRECATED(code) code\n#endif\n#endif\n\n\n#if defined(__GNUC__)\n#    define av_unused __attribute__((unused))\n#else\n#    define av_unused\n#endif\n\n/**\n * Mark a variable as used and prevent the compiler from optimizing it\n * away.  This is useful for variables accessed only from inline\n * assembler without the compiler being aware.\n */\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n#    define av_used __attribute__((used))\n#else\n#    define av_used\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(3,3)\n#   define av_alias __attribute__((may_alias))\n#else\n#   define av_alias\n#endif\n\n#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)\n#    define av_uninit(x) x=x\n#else\n#    define av_uninit(x) x\n#endif\n\n#ifdef __GNUC__\n#    define av_builtin_constant_p __builtin_constant_p\n#    define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos)))\n#else\n#    define av_builtin_constant_p(x) 0\n#    define av_printf_format(fmtpos, attrpos)\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(2,5)\n#    define av_noreturn __attribute__((noreturn))\n#else\n#    define av_noreturn\n#endif\n\n#endif /* AVUTIL_ATTRIBUTES_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/audio_fifo.h",
    "content": "/*\n * Audio FIFO\n * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * Audio FIFO Buffer\n */\n\n#ifndef AVUTIL_AUDIO_FIFO_H\n#define AVUTIL_AUDIO_FIFO_H\n\n#include \"avutil.h\"\n#include \"fifo.h\"\n#include \"samplefmt.h\"\n\n/**\n * @addtogroup lavu_audio\n * @{\n */\n\n/**\n * Context for an Audio FIFO Buffer.\n *\n * - Operates at the sample level rather than the byte level.\n * - Supports multiple channels with either planar or packed sample format.\n * - Automatic reallocation when writing to a full buffer.\n */\ntypedef struct AVAudioFifo AVAudioFifo;\n\n/**\n * Free an AVAudioFifo.\n *\n * @param af  AVAudioFifo to free\n */\nvoid av_audio_fifo_free(AVAudioFifo *af);\n\n/**\n * Allocate an AVAudioFifo.\n *\n * @param sample_fmt  sample format\n * @param channels    number of channels\n * @param nb_samples  initial allocation size, in samples\n * @return            newly allocated AVAudioFifo, or NULL on error\n */\nAVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels,\n                                 int nb_samples);\n\n/**\n * Reallocate an AVAudioFifo.\n *\n * @param af          AVAudioFifo to reallocate\n * @param nb_samples  new allocation size, in samples\n * @return            0 if OK, or negative AVERROR code on failure\n */\nint av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples);\n\n/**\n * Write data to an AVAudioFifo.\n *\n * The AVAudioFifo will be reallocated automatically if the available space\n * is less than nb_samples.\n *\n * @see enum AVSampleFormat\n * The documentation for AVSampleFormat describes the data layout.\n *\n * @param af          AVAudioFifo to write to\n * @param data        audio data plane pointers\n * @param nb_samples  number of samples to write\n * @return            number of samples actually written, or negative AVERROR\n *                    code on failure. If successful, the number of samples\n *                    actually written will always be nb_samples.\n */\nint av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples);\n\n/**\n * Read data from an AVAudioFifo.\n *\n * @see enum AVSampleFormat\n * The documentation for AVSampleFormat describes the data layout.\n *\n * @param af          AVAudioFifo to read from\n * @param data        audio data plane pointers\n * @param nb_samples  number of samples to read\n * @return            number of samples actually read, or negative AVERROR code\n *                    on failure. The number of samples actually read will not\n *                    be greater than nb_samples, and will only be less than\n *                    nb_samples if av_audio_fifo_size is less than nb_samples.\n */\nint av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples);\n\n/**\n * Drain data from an AVAudioFifo.\n *\n * Removes the data without reading it.\n *\n * @param af          AVAudioFifo to drain\n * @param nb_samples  number of samples to drain\n * @return            0 if OK, or negative AVERROR code on failure\n */\nint av_audio_fifo_drain(AVAudioFifo *af, int nb_samples);\n\n/**\n * Reset the AVAudioFifo buffer.\n *\n * This empties all data in the buffer.\n *\n * @param af  AVAudioFifo to reset\n */\nvoid av_audio_fifo_reset(AVAudioFifo *af);\n\n/**\n * Get the current number of samples in the AVAudioFifo available for reading.\n *\n * @param af  the AVAudioFifo to query\n * @return    number of samples available for reading\n */\nint av_audio_fifo_size(AVAudioFifo *af);\n\n/**\n * Get the current number of samples in the AVAudioFifo available for writing.\n *\n * @param af  the AVAudioFifo to query\n * @return    number of samples available for writing\n */\nint av_audio_fifo_space(AVAudioFifo *af);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_AUDIO_FIFO_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/audioconvert.h",
    "content": "\n#include \"version.h\"\n\n#if FF_API_AUDIOCONVERT\n#include \"channel_layout.h\"\n#endif\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/avassert.h",
    "content": "/*\n * copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * simple assert() macros that are a bit more flexible than ISO C assert().\n * @author Michael Niedermayer <michaelni@gmx.at>\n */\n\n#ifndef AVUTIL_AVASSERT_H\n#define AVUTIL_AVASSERT_H\n\n#include <stdlib.h>\n#include \"avutil.h\"\n#include \"log.h\"\n\n/**\n * assert() equivalent, that is always enabled.\n */\n#define av_assert0(cond) do {                                           \\\n    if (!(cond)) {                                                      \\\n        av_log(NULL, AV_LOG_PANIC, \"Assertion %s failed at %s:%d\\n\",    \\\n               AV_STRINGIFY(cond), __FILE__, __LINE__);                 \\\n        abort();                                                        \\\n    }                                                                   \\\n} while (0)\n\n\n/**\n * assert() equivalent, that does not lie in speed critical code.\n * These asserts() thus can be enabled without fearing speedloss.\n */\n#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0\n#define av_assert1(cond) av_assert0(cond)\n#else\n#define av_assert1(cond) ((void)0)\n#endif\n\n\n/**\n * assert() equivalent, that does lie in speed critical code.\n */\n#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1\n#define av_assert2(cond) av_assert0(cond)\n#else\n#define av_assert2(cond) ((void)0)\n#endif\n\n#endif /* AVUTIL_AVASSERT_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/avconfig.h",
    "content": "/* Generated by ffconf */\n#ifndef AVUTIL_AVCONFIG_H\n#define AVUTIL_AVCONFIG_H\n#define AV_HAVE_BIGENDIAN 0\n#define AV_HAVE_FAST_UNALIGNED 1\n#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0\n#define AV_HAVE_INCOMPATIBLE_FORK_ABI 0\n#endif /* AVUTIL_AVCONFIG_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/avstring.h",
    "content": "/*\n * Copyright (c) 2007 Mans Rullgard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_AVSTRING_H\n#define AVUTIL_AVSTRING_H\n\n#include <stddef.h>\n#include <stdint.h>\n#include \"attributes.h\"\n\n/**\n * @addtogroup lavu_string\n * @{\n */\n\n/**\n * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to\n * the address of the first character in str after the prefix.\n *\n * @param str input string\n * @param pfx prefix to test\n * @param ptr updated if the prefix is matched inside str\n * @return non-zero if the prefix matches, zero otherwise\n */\nint av_strstart(const char *str, const char *pfx, const char **ptr);\n\n/**\n * Return non-zero if pfx is a prefix of str independent of case. If\n * it is, *ptr is set to the address of the first character in str\n * after the prefix.\n *\n * @param str input string\n * @param pfx prefix to test\n * @param ptr updated if the prefix is matched inside str\n * @return non-zero if the prefix matches, zero otherwise\n */\nint av_stristart(const char *str, const char *pfx, const char **ptr);\n\n/**\n * Locate the first case-independent occurrence in the string haystack\n * of the string needle.  A zero-length string needle is considered to\n * match at the start of haystack.\n *\n * This function is a case-insensitive version of the standard strstr().\n *\n * @param haystack string to search in\n * @param needle   string to search for\n * @return         pointer to the located match within haystack\n *                 or a null pointer if no match\n */\nchar *av_stristr(const char *haystack, const char *needle);\n\n/**\n * Locate the first occurrence of the string needle in the string haystack\n * where not more than hay_length characters are searched. A zero-length\n * string needle is considered to match at the start of haystack.\n *\n * This function is a length-limited version of the standard strstr().\n *\n * @param haystack   string to search in\n * @param needle     string to search for\n * @param hay_length length of string to search in\n * @return           pointer to the located match within haystack\n *                   or a null pointer if no match\n */\nchar *av_strnstr(const char *haystack, const char *needle, size_t hay_length);\n\n/**\n * Copy the string src to dst, but no more than size - 1 bytes, and\n * null-terminate dst.\n *\n * This function is the same as BSD strlcpy().\n *\n * @param dst destination buffer\n * @param src source string\n * @param size size of destination buffer\n * @return the length of src\n *\n * @warning since the return value is the length of src, src absolutely\n * _must_ be a properly 0-terminated string, otherwise this will read beyond\n * the end of the buffer and possibly crash.\n */\nsize_t av_strlcpy(char *dst, const char *src, size_t size);\n\n/**\n * Append the string src to the string dst, but to a total length of\n * no more than size - 1 bytes, and null-terminate dst.\n *\n * This function is similar to BSD strlcat(), but differs when\n * size <= strlen(dst).\n *\n * @param dst destination buffer\n * @param src source string\n * @param size size of destination buffer\n * @return the total length of src and dst\n *\n * @warning since the return value use the length of src and dst, these\n * absolutely _must_ be a properly 0-terminated strings, otherwise this\n * will read beyond the end of the buffer and possibly crash.\n */\nsize_t av_strlcat(char *dst, const char *src, size_t size);\n\n/**\n * Append output to a string, according to a format. Never write out of\n * the destination buffer, and always put a terminating 0 within\n * the buffer.\n * @param dst destination buffer (string to which the output is\n *  appended)\n * @param size total size of the destination buffer\n * @param fmt printf-compatible format string, specifying how the\n *  following parameters are used\n * @return the length of the string that would have been generated\n *  if enough space had been available\n */\nsize_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4);\n\n/**\n * Get the count of continuous non zero chars starting from the beginning.\n *\n * @param len maximum number of characters to check in the string, that\n *            is the maximum value which is returned by the function\n */\nstatic inline size_t av_strnlen(const char *s, size_t len)\n{\n    size_t i;\n    for (i = 0; i < len && s[i]; i++)\n        ;\n    return i;\n}\n\n/**\n * Print arguments following specified format into a large enough auto\n * allocated buffer. It is similar to GNU asprintf().\n * @param fmt printf-compatible format string, specifying how the\n *            following parameters are used.\n * @return the allocated string\n * @note You have to free the string yourself with av_free().\n */\nchar *av_asprintf(const char *fmt, ...) av_printf_format(1, 2);\n\n/**\n * Convert a number to a av_malloced string.\n */\nchar *av_d2str(double d);\n\n/**\n * Unescape the given string until a non escaped terminating char,\n * and return the token corresponding to the unescaped string.\n *\n * The normal \\ and ' escaping is supported. Leading and trailing\n * whitespaces are removed, unless they are escaped with '\\' or are\n * enclosed between ''.\n *\n * @param buf the buffer to parse, buf will be updated to point to the\n * terminating char\n * @param term a 0-terminated list of terminating chars\n * @return the malloced unescaped string, which must be av_freed by\n * the user, NULL in case of allocation failure\n */\nchar *av_get_token(const char **buf, const char *term);\n\n/**\n * Split the string into several tokens which can be accessed by\n * successive calls to av_strtok().\n *\n * A token is defined as a sequence of characters not belonging to the\n * set specified in delim.\n *\n * On the first call to av_strtok(), s should point to the string to\n * parse, and the value of saveptr is ignored. In subsequent calls, s\n * should be NULL, and saveptr should be unchanged since the previous\n * call.\n *\n * This function is similar to strtok_r() defined in POSIX.1.\n *\n * @param s the string to parse, may be NULL\n * @param delim 0-terminated list of token delimiters, must be non-NULL\n * @param saveptr user-provided pointer which points to stored\n * information necessary for av_strtok() to continue scanning the same\n * string. saveptr is updated to point to the next character after the\n * first delimiter found, or to NULL if the string was terminated\n * @return the found token, or NULL when no token is found\n */\nchar *av_strtok(char *s, const char *delim, char **saveptr);\n\n/**\n * Locale-independent conversion of ASCII isdigit.\n */\nint av_isdigit(int c);\n\n/**\n * Locale-independent conversion of ASCII isgraph.\n */\nint av_isgraph(int c);\n\n/**\n * Locale-independent conversion of ASCII isspace.\n */\nint av_isspace(int c);\n\n/**\n * Locale-independent conversion of ASCII characters to uppercase.\n */\nstatic inline int av_toupper(int c)\n{\n    if (c >= 'a' && c <= 'z')\n        c ^= 0x20;\n    return c;\n}\n\n/**\n * Locale-independent conversion of ASCII characters to lowercase.\n */\nstatic inline int av_tolower(int c)\n{\n    if (c >= 'A' && c <= 'Z')\n        c ^= 0x20;\n    return c;\n}\n\n/**\n * Locale-independent conversion of ASCII isxdigit.\n */\nint av_isxdigit(int c);\n\n/**\n * Locale-independent case-insensitive compare.\n * @note This means only ASCII-range characters are case-insensitive\n */\nint av_strcasecmp(const char *a, const char *b);\n\n/**\n * Locale-independent case-insensitive compare.\n * @note This means only ASCII-range characters are case-insensitive\n */\nint av_strncasecmp(const char *a, const char *b, size_t n);\n\n\n/**\n * Thread safe basename.\n * @param path the path, on DOS both \\ and / are considered separators.\n * @return pointer to the basename substring.\n */\nconst char *av_basename(const char *path);\n\n/**\n * Thread safe dirname.\n * @param path the path, on DOS both \\ and / are considered separators.\n * @return the path with the separator replaced by the string terminator or \".\".\n * @note the function may change the input string.\n */\nconst char *av_dirname(char *path);\n\nenum AVEscapeMode {\n    AV_ESCAPE_MODE_AUTO,      ///< Use auto-selected escaping mode.\n    AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping.\n    AV_ESCAPE_MODE_QUOTE,     ///< Use single-quote escaping.\n};\n\n/**\n * Consider spaces special and escape them even in the middle of the\n * string.\n *\n * This is equivalent to adding the whitespace characters to the special\n * characters lists, except it is guaranteed to use the exact same list\n * of whitespace characters as the rest of libavutil.\n */\n#define AV_ESCAPE_FLAG_WHITESPACE 0x01\n\n/**\n * Escape only specified special characters.\n * Without this flag, escape also any characters that may be considered\n * special by av_get_token(), such as the single quote.\n */\n#define AV_ESCAPE_FLAG_STRICT 0x02\n\n/**\n * Escape string in src, and put the escaped string in an allocated\n * string in *dst, which must be freed with av_free().\n *\n * @param dst           pointer where an allocated string is put\n * @param src           string to escape, must be non-NULL\n * @param special_chars string containing the special characters which\n *                      need to be escaped, can be NULL\n * @param mode          escape mode to employ, see AV_ESCAPE_MODE_* macros.\n *                      Any unknown value for mode will be considered equivalent to\n *                      AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without\n *                      notice.\n * @param flags         flags which control how to escape, see AV_ESCAPE_FLAG_ macros\n * @return the length of the allocated string, or a negative error code in case of error\n * @see av_bprint_escape()\n */\nint av_escape(char **dst, const char *src, const char *special_chars,\n              enum AVEscapeMode mode, int flags);\n\n#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES          1 ///< accept codepoints over 0x10FFFF\n#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS             2 ///< accept non-characters - 0xFFFE and 0xFFFF\n#define AV_UTF8_FLAG_ACCEPT_SURROGATES                 4 ///< accept UTF-16 surrogates codes\n#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML\n\n#define AV_UTF8_FLAG_ACCEPT_ALL \\\n    AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES\n\n/**\n * Read and decode a single UTF-8 code point (character) from the\n * buffer in *buf, and update *buf to point to the next byte to\n * decode.\n *\n * In case of an invalid byte sequence, the pointer will be updated to\n * the next byte after the invalid sequence and the function will\n * return an error code.\n *\n * Depending on the specified flags, the function will also fail in\n * case the decoded code point does not belong to a valid range.\n *\n * @note For speed-relevant code a carefully implemented use of\n * GET_UTF8() may be preferred.\n *\n * @param codep   pointer used to return the parsed code in case of success.\n *                The value in *codep is set even in case the range check fails.\n * @param bufp    pointer to the address the first byte of the sequence\n *                to decode, updated by the function to point to the\n *                byte next after the decoded sequence\n * @param buf_end pointer to the end of the buffer, points to the next\n *                byte past the last in the buffer. This is used to\n *                avoid buffer overreads (in case of an unfinished\n *                UTF-8 sequence towards the end of the buffer).\n * @param flags   a collection of AV_UTF8_FLAG_* flags\n * @return >= 0 in case a sequence was successfully read, a negative\n * value in case of invalid sequence\n */\nint av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end,\n                   unsigned int flags);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_AVSTRING_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/avutil.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_AVUTIL_H\n#define AVUTIL_AVUTIL_H\n\n/**\n * @file\n * external API header\n */\n\n/**\n * @mainpage\n *\n * @section ffmpeg_intro Introduction\n *\n * This document describes the usage of the different libraries\n * provided by FFmpeg.\n *\n * @li @ref libavc \"libavcodec\" encoding/decoding library\n * @li @ref lavfi \"libavfilter\" graph-based frame editing library\n * @li @ref libavf \"libavformat\" I/O and muxing/demuxing library\n * @li @ref lavd \"libavdevice\" special devices muxing/demuxing library\n * @li @ref lavu \"libavutil\" common utility library\n * @li @ref lswr \"libswresample\" audio resampling, format conversion and mixing\n * @li @ref lpp  \"libpostproc\" post processing library\n * @li @ref libsws \"libswscale\" color conversion and scaling library\n *\n * @section ffmpeg_versioning Versioning and compatibility\n *\n * Each of the FFmpeg libraries contains a version.h header, which defines a\n * major, minor and micro version number with the\n * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version\n * number is incremented with backward incompatible changes - e.g. removing\n * parts of the public API, reordering public struct members, etc. The minor\n * version number is incremented for backward compatible API changes or major\n * new features - e.g. adding a new public function or a new decoder. The micro\n * version number is incremented for smaller changes that a calling program\n * might still want to check for - e.g. changing behavior in a previously\n * unspecified situation.\n *\n * FFmpeg guarantees backward API and ABI compatibility for each library as long\n * as its major version number is unchanged. This means that no public symbols\n * will be removed or renamed. Types and names of the public struct members and\n * values of public macros and enums will remain the same (unless they were\n * explicitly declared as not part of the public API). Documented behavior will\n * not change.\n *\n * In other words, any correct program that works with a given FFmpeg snapshot\n * should work just as well without any changes with any later snapshot with the\n * same major versions. This applies to both rebuilding the program against new\n * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program\n * links against.\n *\n * However, new public symbols may be added and new members may be appended to\n * public structs whose size is not part of public ABI (most public structs in\n * FFmpeg). New macros and enum values may be added. Behavior in undocumented\n * situations may change slightly (and be documented). All those are accompanied\n * by an entry in doc/APIchanges and incrementing either the minor or micro\n * version number.\n */\n\n/**\n * @defgroup lavu Common utility functions\n *\n * @brief\n * libavutil contains the code shared across all the other FFmpeg\n * libraries\n *\n * @note In order to use the functions provided by avutil you must include\n * the specific header.\n *\n * @{\n *\n * @defgroup lavu_crypto Crypto and Hashing\n *\n * @{\n * @}\n *\n * @defgroup lavu_math Maths\n * @{\n *\n * @}\n *\n * @defgroup lavu_string String Manipulation\n *\n * @{\n *\n * @}\n *\n * @defgroup lavu_mem Memory Management\n *\n * @{\n *\n * @}\n *\n * @defgroup lavu_data Data Structures\n * @{\n *\n * @}\n *\n * @defgroup lavu_audio Audio related\n *\n * @{\n *\n * @}\n *\n * @defgroup lavu_error Error Codes\n *\n * @{\n *\n * @}\n *\n * @defgroup lavu_log Logging Facility\n *\n * @{\n *\n * @}\n *\n * @defgroup lavu_misc Other\n *\n * @{\n *\n * @defgroup lavu_internal Internal\n *\n * Not exported functions, for internal usage only\n *\n * @{\n *\n * @}\n *\n * @defgroup preproc_misc Preprocessor String Macros\n *\n * @{\n *\n * @}\n */\n\n\n/**\n * @addtogroup lavu_ver\n * @{\n */\n\n/**\n * Return the LIBAVUTIL_VERSION_INT constant.\n */\nunsigned avutil_version(void);\n\n/**\n * Return the libavutil build-time configuration.\n */\nconst char *avutil_configuration(void);\n\n/**\n * Return the libavutil license.\n */\nconst char *avutil_license(void);\n\n/**\n * @}\n */\n\n/**\n * @addtogroup lavu_media Media Type\n * @brief Media Type\n */\n\nenum AVMediaType {\n    AVMEDIA_TYPE_UNKNOWN = -1,  ///< Usually treated as AVMEDIA_TYPE_DATA\n    AVMEDIA_TYPE_VIDEO,\n    AVMEDIA_TYPE_AUDIO,\n    AVMEDIA_TYPE_DATA,          ///< Opaque data information usually continuous\n    AVMEDIA_TYPE_SUBTITLE,\n    AVMEDIA_TYPE_ATTACHMENT,    ///< Opaque data information usually sparse\n    AVMEDIA_TYPE_NB\n};\n\n/**\n * Return a string describing the media_type enum, NULL if media_type\n * is unknown.\n */\nconst char *av_get_media_type_string(enum AVMediaType media_type);\n\n/**\n * @defgroup lavu_const Constants\n * @{\n *\n * @defgroup lavu_enc Encoding specific\n *\n * @note those definition should move to avcodec\n * @{\n */\n\n#define FF_LAMBDA_SHIFT 7\n#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)\n#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda\n#define FF_LAMBDA_MAX (256*128-1)\n\n#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove\n\n/**\n * @}\n * @defgroup lavu_time Timestamp specific\n *\n * FFmpeg internal timebase and timestamp definitions\n *\n * @{\n */\n\n/**\n * @brief Undefined timestamp value\n *\n * Usually reported by demuxer that work on containers that do not provide\n * either pts or dts.\n */\n\n#define AV_NOPTS_VALUE          ((int64_t)UINT64_C(0x8000000000000000))\n\n/**\n * Internal time base represented as integer\n */\n\n#define AV_TIME_BASE            1000000\n\n/**\n * Internal time base represented as fractional value\n */\n\n#define AV_TIME_BASE_Q          (AVRational){1, AV_TIME_BASE}\n\n/**\n * @}\n * @}\n * @defgroup lavu_picture Image related\n *\n * AVPicture types, pixel formats and basic image planes manipulation.\n *\n * @{\n */\n\nenum AVPictureType {\n    AV_PICTURE_TYPE_NONE = 0, ///< Undefined\n    AV_PICTURE_TYPE_I,     ///< Intra\n    AV_PICTURE_TYPE_P,     ///< Predicted\n    AV_PICTURE_TYPE_B,     ///< Bi-dir predicted\n    AV_PICTURE_TYPE_S,     ///< S(GMC)-VOP MPEG4\n    AV_PICTURE_TYPE_SI,    ///< Switching Intra\n    AV_PICTURE_TYPE_SP,    ///< Switching Predicted\n    AV_PICTURE_TYPE_BI,    ///< BI type\n};\n\n/**\n * Return a single letter to describe the given picture type\n * pict_type.\n *\n * @param[in] pict_type the picture type @return a single character\n * representing the picture type, '?' if pict_type is unknown\n */\nchar av_get_picture_type_char(enum AVPictureType pict_type);\n\n/**\n * @}\n */\n\n#include \"common.h\"\n#include \"error.h\"\n#include \"version.h\"\n#include \"macros.h\"\n#include \"mathematics.h\"\n#include \"rational.h\"\n#include \"log.h\"\n#include \"pixfmt.h\"\n\n/**\n * Return x default pointer in case p is NULL.\n */\nstatic inline void *av_x_if_null(const void *p, const void *x)\n{\n    return (void *)(intptr_t)(p ? p : x);\n}\n\n/**\n * Compute the length of an integer list.\n *\n * @param elsize  size in bytes of each list element (only 1, 2, 4 or 8)\n * @param term    list terminator (usually 0 or -1)\n * @param list    pointer to the list\n * @return  length of the list, in elements, not counting the terminator\n */\nunsigned av_int_list_length_for_size(unsigned elsize,\n                                     const void *list, uint64_t term) av_pure;\n\n/**\n * Compute the length of an integer list.\n *\n * @param term  list terminator (usually 0 or -1)\n * @param list  pointer to the list\n * @return  length of the list, in elements, not counting the terminator\n */\n#define av_int_list_length(list, term) \\\n    av_int_list_length_for_size(sizeof(*(list)), list, term)\n\n/**\n * Open a file using a UTF-8 filename.\n * The API of this function matches POSIX fopen(), errors are returned through\n * errno.\n */\nFILE *av_fopen_utf8(const char *path, const char *mode);\n\n/**\n * @}\n * @}\n */\n\n#endif /* AVUTIL_AVUTIL_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/base64.h",
    "content": "/*\n * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_BASE64_H\n#define AVUTIL_BASE64_H\n\n#include <stdint.h>\n\n/**\n * @defgroup lavu_base64 Base64\n * @ingroup lavu_crypto\n * @{\n */\n\n\n/**\n * Decode a base64-encoded string.\n *\n * @param out      buffer for decoded data\n * @param in       null-terminated input string\n * @param out_size size in bytes of the out buffer, must be at\n *                 least 3/4 of the length of in\n * @return         number of bytes written, or a negative value in case of\n *                 invalid input\n */\nint av_base64_decode(uint8_t *out, const char *in, int out_size);\n\n/**\n * Encode data to base64 and null-terminate.\n *\n * @param out      buffer for encoded data\n * @param out_size size in bytes of the out buffer (including the\n *                 null terminator), must be at least AV_BASE64_SIZE(in_size)\n * @param in       input buffer containing the data to encode\n * @param in_size  size in bytes of the in buffer\n * @return         out or NULL in case of error\n */\nchar *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size);\n\n/**\n * Calculate the output size needed to base64-encode x bytes to a\n * null-terminated string.\n */\n#define AV_BASE64_SIZE(x)  (((x)+2) / 3 * 4 + 1)\n\n /**\n  * @}\n  */\n\n#endif /* AVUTIL_BASE64_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/blowfish.h",
    "content": "/*\n * Blowfish algorithm\n * Copyright (c) 2012 Samuel Pitoiset\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_BLOWFISH_H\n#define AVUTIL_BLOWFISH_H\n\n#include <stdint.h>\n\n/**\n * @defgroup lavu_blowfish Blowfish\n * @ingroup lavu_crypto\n * @{\n */\n\n#define AV_BF_ROUNDS 16\n\ntypedef struct AVBlowfish {\n    uint32_t p[AV_BF_ROUNDS + 2];\n    uint32_t s[4][256];\n} AVBlowfish;\n\n/**\n * Initialize an AVBlowfish context.\n *\n * @param ctx an AVBlowfish context\n * @param key a key\n * @param key_len length of the key\n */\nvoid av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len);\n\n/**\n * Encrypt or decrypt a buffer using a previously initialized context.\n *\n * @param ctx an AVBlowfish context\n * @param xl left four bytes halves of input to be encrypted\n * @param xr right four bytes halves of input to be encrypted\n * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr,\n                           int decrypt);\n\n/**\n * Encrypt or decrypt a buffer using a previously initialized context.\n *\n * @param ctx an AVBlowfish context\n * @param dst destination array, can be equal to src\n * @param src source array, can be equal to dst\n * @param count number of 8 byte blocks\n * @param iv initialization vector for CBC mode, if NULL ECB will be used\n * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src,\n                       int count, uint8_t *iv, int decrypt);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_BLOWFISH_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/bprint.h",
    "content": "/*\n * Copyright (c) 2012 Nicolas George\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_BPRINT_H\n#define AVUTIL_BPRINT_H\n\n#include <stdarg.h>\n\n#include \"attributes.h\"\n#include \"avstring.h\"\n\n/**\n * Define a structure with extra padding to a fixed size\n * This helps ensuring binary compatibility with future versions.\n */\n#define FF_PAD_STRUCTURE(size, ...) \\\n    __VA_ARGS__ \\\n    char reserved_padding[size - sizeof(struct { __VA_ARGS__ })];\n\n/**\n * Buffer to print data progressively\n *\n * The string buffer grows as necessary and is always 0-terminated.\n * The content of the string is never accessed, and thus is\n * encoding-agnostic and can even hold binary data.\n *\n * Small buffers are kept in the structure itself, and thus require no\n * memory allocation at all (unless the contents of the buffer is needed\n * after the structure goes out of scope). This is almost as lightweight as\n * declaring a local \"char buf[512]\".\n *\n * The length of the string can go beyond the allocated size: the buffer is\n * then truncated, but the functions still keep account of the actual total\n * length.\n *\n * In other words, buf->len can be greater than buf->size and records the\n * total length of what would have been to the buffer if there had been\n * enough memory.\n *\n * Append operations do not need to be tested for failure: if a memory\n * allocation fails, data stop being appended to the buffer, but the length\n * is still updated. This situation can be tested with\n * av_bprint_is_complete().\n *\n * The size_max field determines several possible behaviours:\n *\n * size_max = -1 (= UINT_MAX) or any large value will let the buffer be\n * reallocated as necessary, with an amortized linear cost.\n *\n * size_max = 0 prevents writing anything to the buffer: only the total\n * length is computed. The write operations can then possibly be repeated in\n * a buffer with exactly the necessary size\n * (using size_init = size_max = len + 1).\n *\n * size_max = 1 is automatically replaced by the exact size available in the\n * structure itself, thus ensuring no dynamic memory allocation. The\n * internal buffer is large enough to hold a reasonable paragraph of text,\n * such as the current paragraph.\n */\ntypedef struct AVBPrint {\n    FF_PAD_STRUCTURE(1024,\n    char *str;         /**< string so far */\n    unsigned len;      /**< length so far */\n    unsigned size;     /**< allocated memory */\n    unsigned size_max; /**< maximum allocated memory */\n    char reserved_internal_buffer[1];\n    )\n} AVBPrint;\n\n/**\n * Convenience macros for special values for av_bprint_init() size_max\n * parameter.\n */\n#define AV_BPRINT_SIZE_UNLIMITED  ((unsigned)-1)\n#define AV_BPRINT_SIZE_AUTOMATIC  1\n#define AV_BPRINT_SIZE_COUNT_ONLY 0\n\n/**\n * Init a print buffer.\n *\n * @param buf        buffer to init\n * @param size_init  initial size (including the final 0)\n * @param size_max   maximum size;\n *                   0 means do not write anything, just count the length;\n *                   1 is replaced by the maximum value for automatic storage;\n *                   any large value means that the internal buffer will be\n *                   reallocated as needed up to that limit; -1 is converted to\n *                   UINT_MAX, the largest limit possible.\n *                   Check also AV_BPRINT_SIZE_* macros.\n */\nvoid av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max);\n\n/**\n * Init a print buffer using a pre-existing buffer.\n *\n * The buffer will not be reallocated.\n *\n * @param buf     buffer structure to init\n * @param buffer  byte buffer to use for the string data\n * @param size    size of buffer\n */\nvoid av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size);\n\n/**\n * Append a formatted string to a print buffer.\n */\nvoid av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3);\n\n/**\n * Append a formatted string to a print buffer.\n */\nvoid av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg);\n\n/**\n * Append char c n times to a print buffer.\n */\nvoid av_bprint_chars(AVBPrint *buf, char c, unsigned n);\n\n/**\n * Append data to a print buffer.\n *\n * param buf  bprint buffer to use\n * param data pointer to data\n * param size size of data\n */\nvoid av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size);\n\nstruct tm;\n/**\n * Append a formatted date and time to a print buffer.\n *\n * param buf  bprint buffer to use\n * param fmt  date and time format string, see strftime()\n * param tm   broken-down time structure to translate\n *\n * @note due to poor design of the standard strftime function, it may\n * produce poor results if the format string expands to a very long text and\n * the bprint buffer is near the limit stated by the size_max option.\n */\nvoid av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm);\n\n/**\n * Allocate bytes in the buffer for external use.\n *\n * @param[in]  buf          buffer structure\n * @param[in]  size         required size\n * @param[out] mem          pointer to the memory area\n * @param[out] actual_size  size of the memory area after allocation;\n *                          can be larger or smaller than size\n */\nvoid av_bprint_get_buffer(AVBPrint *buf, unsigned size,\n                          unsigned char **mem, unsigned *actual_size);\n\n/**\n * Reset the string to \"\" but keep internal allocated data.\n */\nvoid av_bprint_clear(AVBPrint *buf);\n\n/**\n * Test if the print buffer is complete (not truncated).\n *\n * It may have been truncated due to a memory allocation failure\n * or the size_max limit (compare size and size_max if necessary).\n */\nstatic inline int av_bprint_is_complete(AVBPrint *buf)\n{\n    return buf->len < buf->size;\n}\n\n/**\n * Finalize a print buffer.\n *\n * The print buffer can no longer be used afterwards,\n * but the len and size fields are still valid.\n *\n * @arg[out] ret_str  if not NULL, used to return a permanent copy of the\n *                    buffer contents, or NULL if memory allocation fails;\n *                    if NULL, the buffer is discarded and freed\n * @return  0 for success or error code (probably AVERROR(ENOMEM))\n */\nint av_bprint_finalize(AVBPrint *buf, char **ret_str);\n\n/**\n * Escape the content in src and append it to dstbuf.\n *\n * @param dstbuf        already inited destination bprint buffer\n * @param src           string containing the text to escape\n * @param special_chars string containing the special characters which\n *                      need to be escaped, can be NULL\n * @param mode          escape mode to employ, see AV_ESCAPE_MODE_* macros.\n *                      Any unknown value for mode will be considered equivalent to\n *                      AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without\n *                      notice.\n * @param flags         flags which control how to escape, see AV_ESCAPE_FLAG_* macros\n */\nvoid av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars,\n                      enum AVEscapeMode mode, int flags);\n\n#endif /* AVUTIL_BPRINT_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/bswap.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * byte swapping routines\n */\n\n#ifndef AVUTIL_BSWAP_H\n#define AVUTIL_BSWAP_H\n\n#include <stdint.h>\n#include \"libavutil/avconfig.h\"\n#include \"attributes.h\"\n\n#ifdef HAVE_AV_CONFIG_H\n\n#include \"config.h\"\n\n#if   ARCH_AARCH64\n#   include \"aarch64/bswap.h\"\n#elif ARCH_ARM\n#   include \"arm/bswap.h\"\n#elif ARCH_AVR32\n#   include \"avr32/bswap.h\"\n#elif ARCH_BFIN\n#   include \"bfin/bswap.h\"\n#elif ARCH_SH4\n#   include \"sh4/bswap.h\"\n#elif ARCH_X86\n#   include \"x86/bswap.h\"\n#endif\n\n#endif /* HAVE_AV_CONFIG_H */\n\n#define AV_BSWAP16C(x) (((x) << 8 & 0xff00)  | ((x) >> 8 & 0x00ff))\n#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16))\n#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32))\n\n#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x)\n\n#ifndef av_bswap16\nstatic av_always_inline av_const uint16_t av_bswap16(uint16_t x)\n{\n    x= (x>>8) | (x<<8);\n    return x;\n}\n#endif\n\n#ifndef av_bswap32\nstatic av_always_inline av_const uint32_t av_bswap32(uint32_t x)\n{\n    return AV_BSWAP32C(x);\n}\n#endif\n\n#ifndef av_bswap64\nstatic inline uint64_t av_const av_bswap64(uint64_t x)\n{\n    return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32);\n}\n#endif\n\n// be2ne ... big-endian to native-endian\n// le2ne ... little-endian to native-endian\n\n#if AV_HAVE_BIGENDIAN\n#define av_be2ne16(x) (x)\n#define av_be2ne32(x) (x)\n#define av_be2ne64(x) (x)\n#define av_le2ne16(x) av_bswap16(x)\n#define av_le2ne32(x) av_bswap32(x)\n#define av_le2ne64(x) av_bswap64(x)\n#define AV_BE2NEC(s, x) (x)\n#define AV_LE2NEC(s, x) AV_BSWAPC(s, x)\n#else\n#define av_be2ne16(x) av_bswap16(x)\n#define av_be2ne32(x) av_bswap32(x)\n#define av_be2ne64(x) av_bswap64(x)\n#define av_le2ne16(x) (x)\n#define av_le2ne32(x) (x)\n#define av_le2ne64(x) (x)\n#define AV_BE2NEC(s, x) AV_BSWAPC(s, x)\n#define AV_LE2NEC(s, x) (x)\n#endif\n\n#define AV_BE2NE16C(x) AV_BE2NEC(16, x)\n#define AV_BE2NE32C(x) AV_BE2NEC(32, x)\n#define AV_BE2NE64C(x) AV_BE2NEC(64, x)\n#define AV_LE2NE16C(x) AV_LE2NEC(16, x)\n#define AV_LE2NE32C(x) AV_LE2NEC(32, x)\n#define AV_LE2NE64C(x) AV_LE2NEC(64, x)\n\n#endif /* AVUTIL_BSWAP_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/buffer.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * @ingroup lavu_buffer\n * refcounted data buffer API\n */\n\n#ifndef AVUTIL_BUFFER_H\n#define AVUTIL_BUFFER_H\n\n#include <stdint.h>\n\n/**\n * @defgroup lavu_buffer AVBuffer\n * @ingroup lavu_data\n *\n * @{\n * AVBuffer is an API for reference-counted data buffers.\n *\n * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer\n * represents the data buffer itself; it is opaque and not meant to be accessed\n * by the caller directly, but only through AVBufferRef. However, the caller may\n * e.g. compare two AVBuffer pointers to check whether two different references\n * are describing the same data buffer. AVBufferRef represents a single\n * reference to an AVBuffer and it is the object that may be manipulated by the\n * caller directly.\n *\n * There are two functions provided for creating a new AVBuffer with a single\n * reference -- av_buffer_alloc() to just allocate a new buffer, and\n * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing\n * reference, additional references may be created with av_buffer_ref().\n * Use av_buffer_unref() to free a reference (this will automatically free the\n * data once all the references are freed).\n *\n * The convention throughout this API and the rest of FFmpeg is such that the\n * buffer is considered writable if there exists only one reference to it (and\n * it has not been marked as read-only). The av_buffer_is_writable() function is\n * provided to check whether this is true and av_buffer_make_writable() will\n * automatically create a new writable buffer when necessary.\n * Of course nothing prevents the calling code from violating this convention,\n * however that is safe only when all the existing references are under its\n * control.\n *\n * @note Referencing and unreferencing the buffers is thread-safe and thus\n * may be done from multiple threads simultaneously without any need for\n * additional locking.\n *\n * @note Two different references to the same buffer can point to different\n * parts of the buffer (i.e. their AVBufferRef.data will not be equal).\n */\n\n/**\n * A reference counted buffer type. It is opaque and is meant to be used through\n * references (AVBufferRef).\n */\ntypedef struct AVBuffer AVBuffer;\n\n/**\n * A reference to a data buffer.\n *\n * The size of this struct is not a part of the public ABI and it is not meant\n * to be allocated directly.\n */\ntypedef struct AVBufferRef {\n    AVBuffer *buffer;\n\n    /**\n     * The data buffer. It is considered writable if and only if\n     * this is the only reference to the buffer, in which case\n     * av_buffer_is_writable() returns 1.\n     */\n    uint8_t *data;\n    /**\n     * Size of data in bytes.\n     */\n    int      size;\n} AVBufferRef;\n\n/**\n * Allocate an AVBuffer of the given size using av_malloc().\n *\n * @return an AVBufferRef of given size or NULL when out of memory\n */\nAVBufferRef *av_buffer_alloc(int size);\n\n/**\n * Same as av_buffer_alloc(), except the returned buffer will be initialized\n * to zero.\n */\nAVBufferRef *av_buffer_allocz(int size);\n\n/**\n * Always treat the buffer as read-only, even when it has only one\n * reference.\n */\n#define AV_BUFFER_FLAG_READONLY (1 << 0)\n\n/**\n * Create an AVBuffer from an existing array.\n *\n * If this function is successful, data is owned by the AVBuffer. The caller may\n * only access data through the returned AVBufferRef and references derived from\n * it.\n * If this function fails, data is left untouched.\n * @param data   data array\n * @param size   size of data in bytes\n * @param free   a callback for freeing this buffer's data\n * @param opaque parameter to be got for processing or passed to free\n * @param flags  a combination of AV_BUFFER_FLAG_*\n *\n * @return an AVBufferRef referring to data on success, NULL on failure.\n */\nAVBufferRef *av_buffer_create(uint8_t *data, int size,\n                              void (*free)(void *opaque, uint8_t *data),\n                              void *opaque, int flags);\n\n/**\n * Default free callback, which calls av_free() on the buffer data.\n * This function is meant to be passed to av_buffer_create(), not called\n * directly.\n */\nvoid av_buffer_default_free(void *opaque, uint8_t *data);\n\n/**\n * Create a new reference to an AVBuffer.\n *\n * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on\n * failure.\n */\nAVBufferRef *av_buffer_ref(AVBufferRef *buf);\n\n/**\n * Free a given reference and automatically free the buffer if there are no more\n * references to it.\n *\n * @param buf the reference to be freed. The pointer is set to NULL on return.\n */\nvoid av_buffer_unref(AVBufferRef **buf);\n\n/**\n * @return 1 if the caller may write to the data referred to by buf (which is\n * true if and only if buf is the only reference to the underlying AVBuffer).\n * Return 0 otherwise.\n * A positive answer is valid until av_buffer_ref() is called on buf.\n */\nint av_buffer_is_writable(const AVBufferRef *buf);\n\n/**\n * @return the opaque parameter set by av_buffer_create.\n */\nvoid *av_buffer_get_opaque(const AVBufferRef *buf);\n\nint av_buffer_get_ref_count(const AVBufferRef *buf);\n\n/**\n * Create a writable reference from a given buffer reference, avoiding data copy\n * if possible.\n *\n * @param buf buffer reference to make writable. On success, buf is either left\n *            untouched, or it is unreferenced and a new writable AVBufferRef is\n *            written in its place. On failure, buf is left untouched.\n * @return 0 on success, a negative AVERROR on failure.\n */\nint av_buffer_make_writable(AVBufferRef **buf);\n\n/**\n * Reallocate a given buffer.\n *\n * @param buf  a buffer reference to reallocate. On success, buf will be\n *             unreferenced and a new reference with the required size will be\n *             written in its place. On failure buf will be left untouched. *buf\n *             may be NULL, then a new buffer is allocated.\n * @param size required new buffer size.\n * @return 0 on success, a negative AVERROR on failure.\n *\n * @note the buffer is actually reallocated with av_realloc() only if it was\n * initially allocated through av_buffer_realloc(NULL) and there is only one\n * reference to it (i.e. the one passed to this function). In all other cases\n * a new buffer is allocated and the data is copied.\n */\nint av_buffer_realloc(AVBufferRef **buf, int size);\n\n/**\n * @}\n */\n\n/**\n * @defgroup lavu_bufferpool AVBufferPool\n * @ingroup lavu_data\n *\n * @{\n * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers.\n *\n * Frequently allocating and freeing large buffers may be slow. AVBufferPool is\n * meant to solve this in cases when the caller needs a set of buffers of the\n * same size (the most obvious use case being buffers for raw video or audio\n * frames).\n *\n * At the beginning, the user must call av_buffer_pool_init() to create the\n * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to\n * get a reference to a new buffer, similar to av_buffer_alloc(). This new\n * reference works in all aspects the same way as the one created by\n * av_buffer_alloc(). However, when the last reference to this buffer is\n * unreferenced, it is returned to the pool instead of being freed and will be\n * reused for subsequent av_buffer_pool_get() calls.\n *\n * When the caller is done with the pool and no longer needs to allocate any new\n * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable.\n * Once all the buffers are released, it will automatically be freed.\n *\n * Allocating and releasing buffers with this API is thread-safe as long as\n * either the default alloc callback is used, or the user-supplied one is\n * thread-safe.\n */\n\n/**\n * The buffer pool. This structure is opaque and not meant to be accessed\n * directly. It is allocated with av_buffer_pool_init() and freed with\n * av_buffer_pool_uninit().\n */\ntypedef struct AVBufferPool AVBufferPool;\n\n/**\n * Allocate and initialize a buffer pool.\n *\n * @param size size of each buffer in this pool\n * @param alloc a function that will be used to allocate new buffers when the\n * pool is empty. May be NULL, then the default allocator will be used\n * (av_buffer_alloc()).\n * @return newly created buffer pool on success, NULL on error.\n */\nAVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size));\n\n/**\n * Mark the pool as being available for freeing. It will actually be freed only\n * once all the allocated buffers associated with the pool are released. Thus it\n * is safe to call this function while some of the allocated buffers are still\n * in use.\n *\n * @param pool pointer to the pool to be freed. It will be set to NULL.\n * @see av_buffer_pool_can_uninit()\n */\nvoid av_buffer_pool_uninit(AVBufferPool **pool);\n\n/**\n * Allocate a new AVBuffer, reusing an old buffer from the pool when available.\n * This function may be called simultaneously from multiple threads.\n *\n * @return a reference to the new buffer on success, NULL on error.\n */\nAVBufferRef *av_buffer_pool_get(AVBufferPool *pool);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_BUFFER_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/channel_layout.h",
    "content": "/*\n * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n * Copyright (c) 2008 Peter Ross\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_CHANNEL_LAYOUT_H\n#define AVUTIL_CHANNEL_LAYOUT_H\n\n#include <stdint.h>\n\n/**\n * @file\n * audio channel layout utility functions\n */\n\n/**\n * @addtogroup lavu_audio\n * @{\n */\n\n/**\n * @defgroup channel_masks Audio channel masks\n *\n * A channel layout is a 64-bits integer with a bit set for every channel.\n * The number of bits set must be equal to the number of channels.\n * The value 0 means that the channel layout is not known.\n * @note this data structure is not powerful enough to handle channels\n * combinations that have the same channel multiple times, such as\n * dual-mono.\n *\n * @{\n */\n#define AV_CH_FRONT_LEFT             0x00000001\n#define AV_CH_FRONT_RIGHT            0x00000002\n#define AV_CH_FRONT_CENTER           0x00000004\n#define AV_CH_LOW_FREQUENCY          0x00000008\n#define AV_CH_BACK_LEFT              0x00000010\n#define AV_CH_BACK_RIGHT             0x00000020\n#define AV_CH_FRONT_LEFT_OF_CENTER   0x00000040\n#define AV_CH_FRONT_RIGHT_OF_CENTER  0x00000080\n#define AV_CH_BACK_CENTER            0x00000100\n#define AV_CH_SIDE_LEFT              0x00000200\n#define AV_CH_SIDE_RIGHT             0x00000400\n#define AV_CH_TOP_CENTER             0x00000800\n#define AV_CH_TOP_FRONT_LEFT         0x00001000\n#define AV_CH_TOP_FRONT_CENTER       0x00002000\n#define AV_CH_TOP_FRONT_RIGHT        0x00004000\n#define AV_CH_TOP_BACK_LEFT          0x00008000\n#define AV_CH_TOP_BACK_CENTER        0x00010000\n#define AV_CH_TOP_BACK_RIGHT         0x00020000\n#define AV_CH_STEREO_LEFT            0x20000000  ///< Stereo downmix.\n#define AV_CH_STEREO_RIGHT           0x40000000  ///< See AV_CH_STEREO_LEFT.\n#define AV_CH_WIDE_LEFT              0x0000000080000000ULL\n#define AV_CH_WIDE_RIGHT             0x0000000100000000ULL\n#define AV_CH_SURROUND_DIRECT_LEFT   0x0000000200000000ULL\n#define AV_CH_SURROUND_DIRECT_RIGHT  0x0000000400000000ULL\n#define AV_CH_LOW_FREQUENCY_2        0x0000000800000000ULL\n\n/** Channel mask value used for AVCodecContext.request_channel_layout\n    to indicate that the user requests the channel order of the decoder output\n    to be the native codec channel order. */\n#define AV_CH_LAYOUT_NATIVE          0x8000000000000000ULL\n\n/**\n * @}\n * @defgroup channel_mask_c Audio channel convenience macros\n * @{\n * */\n#define AV_CH_LAYOUT_MONO              (AV_CH_FRONT_CENTER)\n#define AV_CH_LAYOUT_STEREO            (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)\n#define AV_CH_LAYOUT_2POINT1           (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_2_1               (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_SURROUND          (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)\n#define AV_CH_LAYOUT_3POINT1           (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_4POINT0           (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_4POINT1           (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_2_2               (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)\n#define AV_CH_LAYOUT_QUAD              (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)\n#define AV_CH_LAYOUT_5POINT0           (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)\n#define AV_CH_LAYOUT_5POINT1           (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_5POINT0_BACK      (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)\n#define AV_CH_LAYOUT_5POINT1_BACK      (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_6POINT0           (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_6POINT0_FRONT     (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)\n#define AV_CH_LAYOUT_HEXAGONAL         (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_6POINT1           (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_6POINT1_BACK      (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_6POINT1_FRONT     (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_7POINT0           (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)\n#define AV_CH_LAYOUT_7POINT0_FRONT     (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)\n#define AV_CH_LAYOUT_7POINT1           (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)\n#define AV_CH_LAYOUT_7POINT1_WIDE      (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)\n#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)\n#define AV_CH_LAYOUT_OCTAGONAL         (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)\n#define AV_CH_LAYOUT_STEREO_DOWNMIX    (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)\n\nenum AVMatrixEncoding {\n    AV_MATRIX_ENCODING_NONE,\n    AV_MATRIX_ENCODING_DOLBY,\n    AV_MATRIX_ENCODING_DPLII,\n    AV_MATRIX_ENCODING_DPLIIX,\n    AV_MATRIX_ENCODING_DPLIIZ,\n    AV_MATRIX_ENCODING_DOLBYEX,\n    AV_MATRIX_ENCODING_DOLBYHEADPHONE,\n    AV_MATRIX_ENCODING_NB\n};\n\n/**\n * @}\n */\n\n/**\n * Return a channel layout id that matches name, or 0 if no match is found.\n *\n * name can be one or several of the following notations,\n * separated by '+' or '|':\n * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0,\n *   5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix);\n * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC,\n *   SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR);\n * - a number of channels, in decimal, optionally followed by 'c', yielding\n *   the default channel layout for that number of channels (@see\n *   av_get_default_channel_layout);\n * - a channel layout mask, in hexadecimal starting with \"0x\" (see the\n *   AV_CH_* macros).\n *\n * @warning Starting from the next major bump the trailing character\n * 'c' to specify a number of channels will be required, while a\n * channel layout mask could also be specified as a decimal number\n * (if and only if not followed by \"c\").\n *\n * Example: \"stereo+FC\" = \"2c+FC\" = \"2c+1c\" = \"0x7\"\n */\nuint64_t av_get_channel_layout(const char *name);\n\n/**\n * Return a description of a channel layout.\n * If nb_channels is <= 0, it is guessed from the channel_layout.\n *\n * @param buf put here the string containing the channel layout\n * @param buf_size size in bytes of the buffer\n */\nvoid av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);\n\nstruct AVBPrint;\n/**\n * Append a description of a channel layout to a bprint buffer.\n */\nvoid av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout);\n\n/**\n * Return the number of channels in the channel layout.\n */\nint av_get_channel_layout_nb_channels(uint64_t channel_layout);\n\n/**\n * Return default channel layout for a given number of channels.\n */\nint64_t av_get_default_channel_layout(int nb_channels);\n\n/**\n * Get the index of a channel in channel_layout.\n *\n * @param channel a channel layout describing exactly one channel which must be\n *                present in channel_layout.\n *\n * @return index of channel in channel_layout on success, a negative AVERROR\n *         on error.\n */\nint av_get_channel_layout_channel_index(uint64_t channel_layout,\n                                        uint64_t channel);\n\n/**\n * Get the channel with the given index in channel_layout.\n */\nuint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);\n\n/**\n * Get the name of a given channel.\n *\n * @return channel name on success, NULL on error.\n */\nconst char *av_get_channel_name(uint64_t channel);\n\n/**\n * Get the description of a given channel.\n *\n * @param channel  a channel layout with a single channel\n * @return  channel description on success, NULL on error\n */\nconst char *av_get_channel_description(uint64_t channel);\n\n/**\n * Get the value and name of a standard channel layout.\n *\n * @param[in]  index   index in an internal list, starting at 0\n * @param[out] layout  channel layout mask\n * @param[out] name    name of the layout\n * @return  0  if the layout exists,\n *          <0 if index is beyond the limits\n */\nint av_get_standard_channel_layout(unsigned index, uint64_t *layout,\n                                   const char **name);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_CHANNEL_LAYOUT_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/common.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * common internal and external API header\n */\n\n#ifndef AVUTIL_COMMON_H\n#define AVUTIL_COMMON_H\n\n#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C)\n#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS\n#endif\n\n#include <errno.h>\n#include <inttypes.h>\n#include <limits.h>\n#include <math.h>\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n#include \"libavutil/avconfig.h\"\n\n#if AV_HAVE_BIGENDIAN\n#   define AV_NE(be, le) (be)\n#else\n#   define AV_NE(be, le) (le)\n#endif\n\n//rounded division & shift\n#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))\n/* assume b>0 */\n#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))\n/* assume a>0 and b>0 */\n#define FF_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \\\n                                                       : ((a) + (1<<(b)) - 1) >> (b))\n#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b))\n#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b))\n#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))\n#define FFSIGN(a) ((a) > 0 ? 1 : -1)\n\n#define FFMAX(a,b) ((a) > (b) ? (a) : (b))\n#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)\n#define FFMIN(a,b) ((a) > (b) ? (b) : (a))\n#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)\n\n#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)\n#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))\n#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1))\n\n/* misc math functions */\n\n/**\n * Reverse the order of the bits of an 8-bits unsigned integer.\n */\n#if FF_API_AV_REVERSE\nextern attribute_deprecated const uint8_t av_reverse[256];\n#endif\n\n#ifdef HAVE_AV_CONFIG_H\n#   include \"config.h\"\n#   include \"intmath.h\"\n#endif\n\n/* Pull in unguarded fallback defines at the end of this file. */\n#include \"common.h\"\n\n#ifndef av_log2\nav_const int av_log2(unsigned v);\n#endif\n\n#ifndef av_log2_16bit\nav_const int av_log2_16bit(unsigned v);\n#endif\n\n/**\n * Clip a signed integer value into the amin-amax range.\n * @param a value to clip\n * @param amin minimum value of the clip range\n * @param amax maximum value of the clip range\n * @return clipped value\n */\nstatic av_always_inline av_const int av_clip_c(int a, int amin, int amax)\n{\n#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2\n    if (amin > amax) abort();\n#endif\n    if      (a < amin) return amin;\n    else if (a > amax) return amax;\n    else               return a;\n}\n\n/**\n * Clip a signed 64bit integer value into the amin-amax range.\n * @param a value to clip\n * @param amin minimum value of the clip range\n * @param amax maximum value of the clip range\n * @return clipped value\n */\nstatic av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax)\n{\n#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2\n    if (amin > amax) abort();\n#endif\n    if      (a < amin) return amin;\n    else if (a > amax) return amax;\n    else               return a;\n}\n\n/**\n * Clip a signed integer value into the 0-255 range.\n * @param a value to clip\n * @return clipped value\n */\nstatic av_always_inline av_const uint8_t av_clip_uint8_c(int a)\n{\n    if (a&(~0xFF)) return (-a)>>31;\n    else           return a;\n}\n\n/**\n * Clip a signed integer value into the -128,127 range.\n * @param a value to clip\n * @return clipped value\n */\nstatic av_always_inline av_const int8_t av_clip_int8_c(int a)\n{\n    if ((a+0x80) & ~0xFF) return (a>>31) ^ 0x7F;\n    else                  return a;\n}\n\n/**\n * Clip a signed integer value into the 0-65535 range.\n * @param a value to clip\n * @return clipped value\n */\nstatic av_always_inline av_const uint16_t av_clip_uint16_c(int a)\n{\n    if (a&(~0xFFFF)) return (-a)>>31;\n    else             return a;\n}\n\n/**\n * Clip a signed integer value into the -32768,32767 range.\n * @param a value to clip\n * @return clipped value\n */\nstatic av_always_inline av_const int16_t av_clip_int16_c(int a)\n{\n    if ((a+0x8000) & ~0xFFFF) return (a>>31) ^ 0x7FFF;\n    else                      return a;\n}\n\n/**\n * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.\n * @param a value to clip\n * @return clipped value\n */\nstatic av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)\n{\n    if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF);\n    else                                         return (int32_t)a;\n}\n\n/**\n * Clip a signed integer to an unsigned power of two range.\n * @param  a value to clip\n * @param  p bit position to clip at\n * @return clipped value\n */\nstatic av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)\n{\n    if (a & ~((1<<p) - 1)) return -a >> 31 & ((1<<p) - 1);\n    else                   return  a;\n}\n\n/**\n * Add two signed 32-bit values with saturation.\n *\n * @param  a one value\n * @param  b another value\n * @return sum with signed saturation\n */\nstatic av_always_inline int av_sat_add32_c(int a, int b)\n{\n    return av_clipl_int32((int64_t)a + b);\n}\n\n/**\n * Add a doubled value to another value with saturation at both stages.\n *\n * @param  a first value\n * @param  b value doubled and added to a\n * @return sum with signed saturation\n */\nstatic av_always_inline int av_sat_dadd32_c(int a, int b)\n{\n    return av_sat_add32(a, av_sat_add32(b, b));\n}\n\n/**\n * Clip a float value into the amin-amax range.\n * @param a value to clip\n * @param amin minimum value of the clip range\n * @param amax maximum value of the clip range\n * @return clipped value\n */\nstatic av_always_inline av_const float av_clipf_c(float a, float amin, float amax)\n{\n#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2\n    if (amin > amax) abort();\n#endif\n    if      (a < amin) return amin;\n    else if (a > amax) return amax;\n    else               return a;\n}\n\n/**\n * Clip a double value into the amin-amax range.\n * @param a value to clip\n * @param amin minimum value of the clip range\n * @param amax maximum value of the clip range\n * @return clipped value\n */\nstatic av_always_inline av_const double av_clipd_c(double a, double amin, double amax)\n{\n#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2\n    if (amin > amax) abort();\n#endif\n    if      (a < amin) return amin;\n    else if (a > amax) return amax;\n    else               return a;\n}\n\n/** Compute ceil(log2(x)).\n * @param x value used to compute ceil(log2(x))\n * @return computed ceiling of log2(x)\n */\nstatic av_always_inline av_const int av_ceil_log2_c(int x)\n{\n    return av_log2((x - 1) << 1);\n}\n\n/**\n * Count number of bits set to one in x\n * @param x value to count bits of\n * @return the number of bits set to one in x\n */\nstatic av_always_inline av_const int av_popcount_c(uint32_t x)\n{\n    x -= (x >> 1) & 0x55555555;\n    x = (x & 0x33333333) + ((x >> 2) & 0x33333333);\n    x = (x + (x >> 4)) & 0x0F0F0F0F;\n    x += x >> 8;\n    return (x + (x >> 16)) & 0x3F;\n}\n\n/**\n * Count number of bits set to one in x\n * @param x value to count bits of\n * @return the number of bits set to one in x\n */\nstatic av_always_inline av_const int av_popcount64_c(uint64_t x)\n{\n    return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32));\n}\n\n#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))\n#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))\n\n/**\n * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.\n *\n * @param val      Output value, must be an lvalue of type uint32_t.\n * @param GET_BYTE Expression reading one byte from the input.\n *                 Evaluated up to 7 times (4 for the currently\n *                 assigned Unicode range).  With a memory buffer\n *                 input, this could be *ptr++.\n * @param ERROR    Expression to be evaluated on invalid input,\n *                 typically a goto statement.\n *\n * @warning ERROR should not contain a loop control statement which\n * could interact with the internal while loop, and should force an\n * exit from the macro code (e.g. through a goto or a return) in order\n * to prevent undefined results.\n */\n#define GET_UTF8(val, GET_BYTE, ERROR)\\\n    val= GET_BYTE;\\\n    {\\\n        uint32_t top = (val & 128) >> 1;\\\n        if ((val & 0xc0) == 0x80 || val >= 0xFE)\\\n            ERROR\\\n        while (val & top) {\\\n            int tmp= GET_BYTE - 128;\\\n            if(tmp>>6)\\\n                ERROR\\\n            val= (val<<6) + tmp;\\\n            top <<= 5;\\\n        }\\\n        val &= (top << 1) - 1;\\\n    }\n\n/**\n * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.\n *\n * @param val       Output value, must be an lvalue of type uint32_t.\n * @param GET_16BIT Expression returning two bytes of UTF-16 data converted\n *                  to native byte order.  Evaluated one or two times.\n * @param ERROR     Expression to be evaluated on invalid input,\n *                  typically a goto statement.\n */\n#define GET_UTF16(val, GET_16BIT, ERROR)\\\n    val = GET_16BIT;\\\n    {\\\n        unsigned int hi = val - 0xD800;\\\n        if (hi < 0x800) {\\\n            val = GET_16BIT - 0xDC00;\\\n            if (val > 0x3FFU || hi > 0x3FFU)\\\n                ERROR\\\n            val += (hi<<10) + 0x10000;\\\n        }\\\n    }\\\n\n/**\n * @def PUT_UTF8(val, tmp, PUT_BYTE)\n * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long).\n * @param val is an input-only argument and should be of type uint32_t. It holds\n * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If\n * val is given as a function it is executed only once.\n * @param tmp is a temporary variable and should be of type uint8_t. It\n * represents an intermediate value during conversion that is to be\n * output by PUT_BYTE.\n * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.\n * It could be a function or a statement, and uses tmp as the input byte.\n * For example, PUT_BYTE could be \"*output++ = tmp;\" PUT_BYTE will be\n * executed up to 4 times for values in the valid UTF-8 range and up to\n * 7 times in the general case, depending on the length of the converted\n * Unicode character.\n */\n#define PUT_UTF8(val, tmp, PUT_BYTE)\\\n    {\\\n        int bytes, shift;\\\n        uint32_t in = val;\\\n        if (in < 0x80) {\\\n            tmp = in;\\\n            PUT_BYTE\\\n        } else {\\\n            bytes = (av_log2(in) + 4) / 5;\\\n            shift = (bytes - 1) * 6;\\\n            tmp = (256 - (256 >> bytes)) | (in >> shift);\\\n            PUT_BYTE\\\n            while (shift >= 6) {\\\n                shift -= 6;\\\n                tmp = 0x80 | ((in >> shift) & 0x3f);\\\n                PUT_BYTE\\\n            }\\\n        }\\\n    }\n\n/**\n * @def PUT_UTF16(val, tmp, PUT_16BIT)\n * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).\n * @param val is an input-only argument and should be of type uint32_t. It holds\n * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If\n * val is given as a function it is executed only once.\n * @param tmp is a temporary variable and should be of type uint16_t. It\n * represents an intermediate value during conversion that is to be\n * output by PUT_16BIT.\n * @param PUT_16BIT writes the converted UTF-16 data to any proper destination\n * in desired endianness. It could be a function or a statement, and uses tmp\n * as the input byte.  For example, PUT_BYTE could be \"*output++ = tmp;\"\n * PUT_BYTE will be executed 1 or 2 times depending on input character.\n */\n#define PUT_UTF16(val, tmp, PUT_16BIT)\\\n    {\\\n        uint32_t in = val;\\\n        if (in < 0x10000) {\\\n            tmp = in;\\\n            PUT_16BIT\\\n        } else {\\\n            tmp = 0xD800 | ((in - 0x10000) >> 10);\\\n            PUT_16BIT\\\n            tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\\\n            PUT_16BIT\\\n        }\\\n    }\\\n\n\n\n#include \"mem.h\"\n\n#ifdef HAVE_AV_CONFIG_H\n#    include \"internal.h\"\n#endif /* HAVE_AV_CONFIG_H */\n\n#endif /* AVUTIL_COMMON_H */\n\n/*\n * The following definitions are outside the multiple inclusion guard\n * to ensure they are immediately available in intmath.h.\n */\n\n#ifndef av_ceil_log2\n#   define av_ceil_log2     av_ceil_log2_c\n#endif\n#ifndef av_clip\n#   define av_clip          av_clip_c\n#endif\n#ifndef av_clip64\n#   define av_clip64        av_clip64_c\n#endif\n#ifndef av_clip_uint8\n#   define av_clip_uint8    av_clip_uint8_c\n#endif\n#ifndef av_clip_int8\n#   define av_clip_int8     av_clip_int8_c\n#endif\n#ifndef av_clip_uint16\n#   define av_clip_uint16   av_clip_uint16_c\n#endif\n#ifndef av_clip_int16\n#   define av_clip_int16    av_clip_int16_c\n#endif\n#ifndef av_clipl_int32\n#   define av_clipl_int32   av_clipl_int32_c\n#endif\n#ifndef av_clip_uintp2\n#   define av_clip_uintp2   av_clip_uintp2_c\n#endif\n#ifndef av_sat_add32\n#   define av_sat_add32     av_sat_add32_c\n#endif\n#ifndef av_sat_dadd32\n#   define av_sat_dadd32    av_sat_dadd32_c\n#endif\n#ifndef av_clipf\n#   define av_clipf         av_clipf_c\n#endif\n#ifndef av_clipd\n#   define av_clipd         av_clipd_c\n#endif\n#ifndef av_popcount\n#   define av_popcount      av_popcount_c\n#endif\n#ifndef av_popcount64\n#   define av_popcount64    av_popcount64_c\n#endif\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/cpu.h",
    "content": "/*\n * Copyright (c) 2000, 2001, 2002 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_CPU_H\n#define AVUTIL_CPU_H\n\n#include \"attributes.h\"\n\n#define AV_CPU_FLAG_FORCE    0x80000000 /* force usage of selected flags (OR) */\n\n    /* lower 16 bits - CPU features */\n#define AV_CPU_FLAG_MMX          0x0001 ///< standard MMX\n#define AV_CPU_FLAG_MMXEXT       0x0002 ///< SSE integer functions or AMD MMX ext\n#define AV_CPU_FLAG_MMX2         0x0002 ///< SSE integer functions or AMD MMX ext\n#define AV_CPU_FLAG_3DNOW        0x0004 ///< AMD 3DNOW\n#define AV_CPU_FLAG_SSE          0x0008 ///< SSE functions\n#define AV_CPU_FLAG_SSE2         0x0010 ///< PIV SSE2 functions\n#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster\n                                        ///< than regular MMX/SSE (e.g. Core1)\n#define AV_CPU_FLAG_3DNOWEXT     0x0020 ///< AMD 3DNowExt\n#define AV_CPU_FLAG_SSE3         0x0040 ///< Prescott SSE3 functions\n#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster\n                                        ///< than regular MMX/SSE (e.g. Core1)\n#define AV_CPU_FLAG_SSSE3        0x0080 ///< Conroe SSSE3 functions\n#define AV_CPU_FLAG_ATOM     0x10000000 ///< Atom processor, some SSSE3 instructions are slower\n#define AV_CPU_FLAG_SSE4         0x0100 ///< Penryn SSE4.1 functions\n#define AV_CPU_FLAG_SSE42        0x0200 ///< Nehalem SSE4.2 functions\n#define AV_CPU_FLAG_AVX          0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used\n#define AV_CPU_FLAG_XOP          0x0400 ///< Bulldozer XOP functions\n#define AV_CPU_FLAG_FMA4         0x0800 ///< Bulldozer FMA4 functions\n// #if LIBAVUTIL_VERSION_MAJOR <52\n#define AV_CPU_FLAG_CMOV      0x1001000 ///< supports cmov instruction\n// #else\n// #define AV_CPU_FLAG_CMOV         0x1000 ///< supports cmov instruction\n// #endif\n#define AV_CPU_FLAG_AVX2         0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used\n#define AV_CPU_FLAG_FMA3        0x10000 ///< Haswell FMA3 functions\n#define AV_CPU_FLAG_BMI1        0x20000 ///< Bit Manipulation Instruction Set 1\n#define AV_CPU_FLAG_BMI2        0x40000 ///< Bit Manipulation Instruction Set 2\n\n#define AV_CPU_FLAG_ALTIVEC      0x0001 ///< standard\n\n#define AV_CPU_FLAG_ARMV5TE      (1 << 0)\n#define AV_CPU_FLAG_ARMV6        (1 << 1)\n#define AV_CPU_FLAG_ARMV6T2      (1 << 2)\n#define AV_CPU_FLAG_VFP          (1 << 3)\n#define AV_CPU_FLAG_VFPV3        (1 << 4)\n#define AV_CPU_FLAG_NEON         (1 << 5)\n\n/**\n * Return the flags which specify extensions supported by the CPU.\n * The returned value is affected by av_force_cpu_flags() if that was used\n * before. So av_get_cpu_flags() can easily be used in a application to\n * detect the enabled cpu flags.\n */\nint av_get_cpu_flags(void);\n\n/**\n * Disables cpu detection and forces the specified flags.\n * -1 is a special case that disables forcing of specific flags.\n */\nvoid av_force_cpu_flags(int flags);\n\n/**\n * Set a mask on flags returned by av_get_cpu_flags().\n * This function is mainly useful for testing.\n * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible\n *\n * @warning this function is not thread safe.\n */\nattribute_deprecated void av_set_cpu_flags_mask(int mask);\n\n/**\n * Parse CPU flags from a string.\n *\n * The returned flags contain the specified flags as well as related unspecified flags.\n *\n * This function exists only for compatibility with libav.\n * Please use av_parse_cpu_caps() when possible.\n * @return a combination of AV_CPU_* flags, negative on error.\n */\nattribute_deprecated\nint av_parse_cpu_flags(const char *s);\n\n/**\n * Parse CPU caps from a string and update the given AV_CPU_* flags based on that.\n *\n * @return negative on error.\n */\nint av_parse_cpu_caps(unsigned *flags, const char *s);\n\n/**\n * @return the number of logical CPU cores present.\n */\nint av_cpu_count(void);\n\n#endif /* AVUTIL_CPU_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/crc.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_CRC_H\n#define AVUTIL_CRC_H\n\n#include <stdint.h>\n#include <stddef.h>\n#include \"attributes.h\"\n\n/**\n * @defgroup lavu_crc32 CRC32\n * @ingroup lavu_crypto\n * @{\n */\n\ntypedef uint32_t AVCRC;\n\ntypedef enum {\n    AV_CRC_8_ATM,\n    AV_CRC_16_ANSI,\n    AV_CRC_16_CCITT,\n    AV_CRC_32_IEEE,\n    AV_CRC_32_IEEE_LE,  /*< reversed bitorder version of AV_CRC_32_IEEE */\n    AV_CRC_24_IEEE = 12,\n    AV_CRC_MAX,         /*< Not part of public API! Do not use outside libavutil. */\n}AVCRCId;\n\n/**\n * Initialize a CRC table.\n * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024\n * @param le If 1, the lowest bit represents the coefficient for the highest\n *           exponent of the corresponding polynomial (both for poly and\n *           actual CRC).\n *           If 0, you must swap the CRC parameter and the result of av_crc\n *           if you need the standard representation (can be simplified in\n *           most cases to e.g. bswap16):\n *           av_bswap32(crc << (32-bits))\n * @param bits number of bits for the CRC\n * @param poly generator polynomial without the x**bits coefficient, in the\n *             representation as specified by le\n * @param ctx_size size of ctx in bytes\n * @return <0 on failure\n */\nint av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);\n\n/**\n * Get an initialized standard CRC table.\n * @param crc_id ID of a standard CRC\n * @return a pointer to the CRC table or NULL on failure\n */\nconst AVCRC *av_crc_get_table(AVCRCId crc_id);\n\n/**\n * Calculate the CRC of a block.\n * @param crc CRC of previous blocks if any or initial value for CRC\n * @return CRC updated with the data from the given block\n *\n * @see av_crc_init() \"le\" parameter\n */\nuint32_t av_crc(const AVCRC *ctx, uint32_t crc,\n                const uint8_t *buffer, size_t length) av_pure;\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_CRC_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/dict.h",
    "content": "/*\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * Public dictionary API.\n * @deprecated\n *  AVDictionary is provided for compatibility with libav. It is both in\n *  implementation as well as API inefficient. It does not scale and is\n *  extremely slow with large dictionaries.\n *  It is recommended that new code uses our tree container from tree.c/h\n *  where applicable, which uses AVL trees to achieve O(log n) performance.\n */\n\n#ifndef AVUTIL_DICT_H\n#define AVUTIL_DICT_H\n\n/**\n * @addtogroup lavu_dict AVDictionary\n * @ingroup lavu_data\n *\n * @brief Simple key:value store\n *\n * @{\n * Dictionaries are used for storing key:value pairs. To create\n * an AVDictionary, simply pass an address of a NULL pointer to\n * av_dict_set(). NULL can be used as an empty dictionary wherever\n * a pointer to an AVDictionary is required.\n * Use av_dict_get() to retrieve an entry or iterate over all\n * entries and finally av_dict_free() to free the dictionary\n * and all its contents.\n *\n @code\n   AVDictionary *d = NULL;           // \"create\" an empty dictionary\n   AVDictionaryEntry *t = NULL;\n\n   av_dict_set(&d, \"foo\", \"bar\", 0); // add an entry\n\n   char *k = av_strdup(\"key\");       // if your strings are already allocated,\n   char *v = av_strdup(\"value\");     // you can avoid copying them like this\n   av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);\n\n   while (t = av_dict_get(d, \"\", t, AV_DICT_IGNORE_SUFFIX)) {\n       <....>                             // iterate over all entries in d\n   }\n   av_dict_free(&d);\n @endcode\n *\n */\n\n#define AV_DICT_MATCH_CASE      1   /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */\n#define AV_DICT_IGNORE_SUFFIX   2   /**< Return first entry in a dictionary whose first part corresponds to the search key,\n                                         ignoring the suffix of the found key string. Only relevant in av_dict_get(). */\n#define AV_DICT_DONT_STRDUP_KEY 4   /**< Take ownership of a key that's been\n                                         allocated with av_malloc() or another memory allocation function. */\n#define AV_DICT_DONT_STRDUP_VAL 8   /**< Take ownership of a value that's been\n                                         allocated with av_malloc() or another memory allocation function. */\n#define AV_DICT_DONT_OVERWRITE 16   ///< Don't overwrite existing entries.\n#define AV_DICT_APPEND         32   /**< If the entry already exists, append to it.  Note that no\n                                      delimiter is added, the strings are simply concatenated. */\n\ntypedef struct AVDictionaryEntry {\n    char *key;\n    char *value;\n} AVDictionaryEntry;\n\ntypedef struct AVDictionary AVDictionary;\n\n/**\n * Get a dictionary entry with matching key.\n *\n * The returned entry key or value must not be changed, or it will\n * cause undefined behavior.\n *\n * To iterate through all the dictionary entries, you can set the matching key\n * to the null string \"\" and set the AV_DICT_IGNORE_SUFFIX flag.\n *\n * @param prev Set to the previous matching element to find the next.\n *             If set to NULL the first matching element is returned.\n * @param key matching key\n * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved\n * @return found entry or NULL in case no matching entry was found in the dictionary\n */\nAVDictionaryEntry *\nav_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags);\n\n/**\n * Get number of entries in dictionary.\n *\n * @param m dictionary\n * @return  number of entries in dictionary\n */\nint av_dict_count(const AVDictionary *m);\n\n/**\n * Set the given entry in *pm, overwriting an existing entry.\n *\n * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL\n * a dictionary struct is allocated and put in *pm.\n * @param key entry key to add to *pm (will be av_strduped depending on flags)\n * @param value entry value to add to *pm (will be av_strduped depending on flags).\n *        Passing a NULL value will cause an existing entry to be deleted.\n * @return >= 0 on success otherwise an error code <0\n */\nint av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);\n\n/**\n * Parse the key/value pairs list and add the parsed entries to a dictionary.\n *\n * In case of failure, all the successfully set entries are stored in\n * *pm. You may need to manually free the created dictionary.\n *\n * @param key_val_sep  a 0-terminated list of characters used to separate\n *                     key from value\n * @param pairs_sep    a 0-terminated list of characters used to separate\n *                     two pairs from each other\n * @param flags        flags to use when adding to dictionary.\n *                     AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL\n *                     are ignored since the key/value tokens will always\n *                     be duplicated.\n * @return             0 on success, negative AVERROR code on failure\n */\nint av_dict_parse_string(AVDictionary **pm, const char *str,\n                         const char *key_val_sep, const char *pairs_sep,\n                         int flags);\n\n/**\n * Copy entries from one AVDictionary struct into another.\n * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,\n *            this function will allocate a struct for you and put it in *dst\n * @param src pointer to source AVDictionary struct\n * @param flags flags to use when setting entries in *dst\n * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag\n */\nvoid av_dict_copy(AVDictionary **dst, AVDictionary *src, int flags);\n\n/**\n * Free all the memory allocated for an AVDictionary struct\n * and all keys and values.\n */\nvoid av_dict_free(AVDictionary **m);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_DICT_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/downmix_info.h",
    "content": "/*\n * Copyright (c) 2014 Tim Walker <tdskywalker@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_DOWNMIX_INFO_H\n#define AVUTIL_DOWNMIX_INFO_H\n\n#include \"frame.h\"\n\n/**\n * @file\n * audio downmix medatata\n */\n\n/**\n * @addtogroup lavu_audio\n * @{\n */\n\n/**\n * @defgroup downmix_info Audio downmix metadata\n * @{\n */\n\n/**\n * Possible downmix types.\n */\nenum AVDownmixType {\n    AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */\n    AV_DOWNMIX_TYPE_LORO,    /**< Lo/Ro 2-channel downmix (Stereo). */\n    AV_DOWNMIX_TYPE_LTRT,    /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */\n    AV_DOWNMIX_TYPE_DPLII,   /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */\n    AV_DOWNMIX_TYPE_NB       /**< Number of downmix types. Not part of ABI. */\n};\n\n/**\n * This structure describes optional metadata relevant to a downmix procedure.\n *\n * All fields are set by the decoder to the value indicated in the audio\n * bitstream (if present), or to a \"sane\" default otherwise.\n */\ntypedef struct AVDownmixInfo {\n    /**\n     * Type of downmix preferred by the mastering engineer.\n     */\n    enum AVDownmixType preferred_downmix_type;\n\n    /**\n     * Absolute scale factor representing the nominal level of the center\n     * channel during a regular downmix.\n     */\n    double center_mix_level;\n\n    /**\n     * Absolute scale factor representing the nominal level of the center\n     * channel during an Lt/Rt compatible downmix.\n     */\n    double center_mix_level_ltrt;\n\n    /**\n     * Absolute scale factor representing the nominal level of the surround\n     * channels during a regular downmix.\n     */\n    double surround_mix_level;\n\n    /**\n     * Absolute scale factor representing the nominal level of the surround\n     * channels during an Lt/Rt compatible downmix.\n     */\n    double surround_mix_level_ltrt;\n\n    /**\n     * Absolute scale factor representing the level at which the LFE data is\n     * mixed into L/R channels during downmixing.\n     */\n    double lfe_mix_level;\n} AVDownmixInfo;\n\n/**\n * Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing.\n *\n * The side data is created and added to the frame if it's absent.\n *\n * @param frame the frame for which the side data is to be obtained.\n *\n * @return the AVDownmixInfo structure to be edited by the caller.\n */\nAVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame);\n\n/**\n * @}\n */\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_DOWNMIX_INFO_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/error.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * error code definitions\n */\n\n#ifndef AVUTIL_ERROR_H\n#define AVUTIL_ERROR_H\n\n#include <errno.h>\n#include <stddef.h>\n\n/**\n * @addtogroup lavu_error\n *\n * @{\n */\n\n\n/* error handling */\n#if EDOM > 0\n#define AVERROR(e) (-(e))   ///< Returns a negative error code from a POSIX error code, to return from library functions.\n#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value.\n#else\n/* Some platforms have E* and errno already negated. */\n#define AVERROR(e) (e)\n#define AVUNERROR(e) (e)\n#endif\n\n#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d))\n\n#define AVERROR_BSF_NOT_FOUND      FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found\n#define AVERROR_BUG                FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2\n#define AVERROR_BUFFER_TOO_SMALL   FFERRTAG( 'B','U','F','S') ///< Buffer too small\n#define AVERROR_DECODER_NOT_FOUND  FFERRTAG(0xF8,'D','E','C') ///< Decoder not found\n#define AVERROR_DEMUXER_NOT_FOUND  FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found\n#define AVERROR_ENCODER_NOT_FOUND  FFERRTAG(0xF8,'E','N','C') ///< Encoder not found\n#define AVERROR_EOF                FFERRTAG( 'E','O','F',' ') ///< End of file\n#define AVERROR_EXIT               FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted\n#define AVERROR_EXTERNAL           FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library\n#define AVERROR_FILTER_NOT_FOUND   FFERRTAG(0xF8,'F','I','L') ///< Filter not found\n#define AVERROR_INVALIDDATA        FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input\n#define AVERROR_MUXER_NOT_FOUND    FFERRTAG(0xF8,'M','U','X') ///< Muxer not found\n#define AVERROR_OPTION_NOT_FOUND   FFERRTAG(0xF8,'O','P','T') ///< Option not found\n#define AVERROR_PATCHWELCOME       FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome\n#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found\n\n#define AVERROR_STREAM_NOT_FOUND   FFERRTAG(0xF8,'S','T','R') ///< Stream not found\n/**\n * This is semantically identical to AVERROR_BUG\n * it has been introduced in Libav after our AVERROR_BUG and with a modified value.\n */\n#define AVERROR_BUG2               FFERRTAG( 'B','U','G',' ')\n#define AVERROR_UNKNOWN            FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library\n#define AVERROR_EXPERIMENTAL       (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.\n\n#define AV_ERROR_MAX_STRING_SIZE 64\n\n/**\n * Put a description of the AVERROR code errnum in errbuf.\n * In case of failure the global variable errno is set to indicate the\n * error. Even in case of failure av_strerror() will print a generic\n * error message indicating the errnum provided to errbuf.\n *\n * @param errnum      error code to describe\n * @param errbuf      buffer to which description is written\n * @param errbuf_size the size in bytes of errbuf\n * @return 0 on success, a negative value if a description for errnum\n * cannot be found\n */\nint av_strerror(int errnum, char *errbuf, size_t errbuf_size);\n\n/**\n * Fill the provided buffer with a string containing an error string\n * corresponding to the AVERROR code errnum.\n *\n * @param errbuf         a buffer\n * @param errbuf_size    size in bytes of errbuf\n * @param errnum         error code to describe\n * @return the buffer in input, filled with the error description\n * @see av_strerror()\n */\nstatic inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum)\n{\n    av_strerror(errnum, errbuf, errbuf_size);\n    return errbuf;\n}\n\n/**\n * Convenience macro, the return value should be used only directly in\n * function arguments but never stand-alone.\n */\n#define av_err2str(errnum) \\\n    av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_ERROR_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/eval.h",
    "content": "/*\n * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * simple arithmetic expression evaluator\n */\n\n#ifndef AVUTIL_EVAL_H\n#define AVUTIL_EVAL_H\n\n#include \"avutil.h\"\n\ntypedef struct AVExpr AVExpr;\n\n/**\n * Parse and evaluate an expression.\n * Note, this is significantly slower than av_expr_eval().\n *\n * @param res a pointer to a double where is put the result value of\n * the expression, or NAN in case of error\n * @param s expression as a zero terminated string, for example \"1+2^3+5*5+sin(2/3)\"\n * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {\"PI\", \"E\", 0}\n * @param const_values a zero terminated array of values for the identifiers from const_names\n * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers\n * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument\n * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers\n * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments\n * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2\n * @param log_ctx parent logging context\n * @return >= 0 in case of success, a negative value corresponding to an\n * AVERROR code otherwise\n */\nint av_expr_parse_and_eval(double *res, const char *s,\n                           const char * const *const_names, const double *const_values,\n                           const char * const *func1_names, double (* const *funcs1)(void *, double),\n                           const char * const *func2_names, double (* const *funcs2)(void *, double, double),\n                           void *opaque, int log_offset, void *log_ctx);\n\n/**\n * Parse an expression.\n *\n * @param expr a pointer where is put an AVExpr containing the parsed\n * value in case of successful parsing, or NULL otherwise.\n * The pointed to AVExpr must be freed with av_expr_free() by the user\n * when it is not needed anymore.\n * @param s expression as a zero terminated string, for example \"1+2^3+5*5+sin(2/3)\"\n * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {\"PI\", \"E\", 0}\n * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers\n * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument\n * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers\n * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments\n * @param log_ctx parent logging context\n * @return >= 0 in case of success, a negative value corresponding to an\n * AVERROR code otherwise\n */\nint av_expr_parse(AVExpr **expr, const char *s,\n                  const char * const *const_names,\n                  const char * const *func1_names, double (* const *funcs1)(void *, double),\n                  const char * const *func2_names, double (* const *funcs2)(void *, double, double),\n                  int log_offset, void *log_ctx);\n\n/**\n * Evaluate a previously parsed expression.\n *\n * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names\n * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2\n * @return the value of the expression\n */\ndouble av_expr_eval(AVExpr *e, const double *const_values, void *opaque);\n\n/**\n * Free a parsed expression previously created with av_expr_parse().\n */\nvoid av_expr_free(AVExpr *e);\n\n/**\n * Parse the string in numstr and return its value as a double. If\n * the string is empty, contains only whitespaces, or does not contain\n * an initial substring that has the expected syntax for a\n * floating-point number, no conversion is performed. In this case,\n * returns a value of zero and the value returned in tail is the value\n * of numstr.\n *\n * @param numstr a string representing a number, may contain one of\n * the International System number postfixes, for example 'K', 'M',\n * 'G'. If 'i' is appended after the postfix, powers of 2 are used\n * instead of powers of 10. The 'B' postfix multiplies the value for\n * 8, and can be appended after another postfix or used alone. This\n * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix.\n * @param tail if non-NULL puts here the pointer to the char next\n * after the last parsed character\n */\ndouble av_strtod(const char *numstr, char **tail);\n\n#endif /* AVUTIL_EVAL_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/ffversion.h",
    "content": "#ifndef AVUTIL_FFVERSION_H\n#define AVUTIL_FFVERSION_H\n#define FFMPEG_VERSION \"2.2.3\"\n#endif /* AVUTIL_FFVERSION_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/fifo.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * a very simple circular buffer FIFO implementation\n */\n\n#ifndef AVUTIL_FIFO_H\n#define AVUTIL_FIFO_H\n\n#include <stdint.h>\n#include \"avutil.h\"\n#include \"attributes.h\"\n\ntypedef struct AVFifoBuffer {\n    uint8_t *buffer;\n    uint8_t *rptr, *wptr, *end;\n    uint32_t rndx, wndx;\n} AVFifoBuffer;\n\n/**\n * Initialize an AVFifoBuffer.\n * @param size of FIFO\n * @return AVFifoBuffer or NULL in case of memory allocation failure\n */\nAVFifoBuffer *av_fifo_alloc(unsigned int size);\n\n/**\n * Free an AVFifoBuffer.\n * @param f AVFifoBuffer to free\n */\nvoid av_fifo_free(AVFifoBuffer *f);\n\n/**\n * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.\n * @param f AVFifoBuffer to reset\n */\nvoid av_fifo_reset(AVFifoBuffer *f);\n\n/**\n * Return the amount of data in bytes in the AVFifoBuffer, that is the\n * amount of data you can read from it.\n * @param f AVFifoBuffer to read from\n * @return size\n */\nint av_fifo_size(AVFifoBuffer *f);\n\n/**\n * Return the amount of space in bytes in the AVFifoBuffer, that is the\n * amount of data you can write into it.\n * @param f AVFifoBuffer to write into\n * @return size\n */\nint av_fifo_space(AVFifoBuffer *f);\n\n/**\n * Feed data from an AVFifoBuffer to a user-supplied callback.\n * @param f AVFifoBuffer to read from\n * @param buf_size number of bytes to read\n * @param func generic read function\n * @param dest data destination\n */\nint av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));\n\n/**\n * Feed data from a user-supplied callback to an AVFifoBuffer.\n * @param f AVFifoBuffer to write to\n * @param src data source; non-const since it may be used as a\n * modifiable context by the function defined in func\n * @param size number of bytes to write\n * @param func generic write function; the first parameter is src,\n * the second is dest_buf, the third is dest_buf_size.\n * func must return the number of bytes written to dest_buf, or <= 0 to\n * indicate no more data available to write.\n * If func is NULL, src is interpreted as a simple byte array for source data.\n * @return the number of bytes written to the FIFO\n */\nint av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));\n\n/**\n * Resize an AVFifoBuffer.\n * In case of reallocation failure, the old FIFO is kept unchanged.\n *\n * @param f AVFifoBuffer to resize\n * @param size new AVFifoBuffer size in bytes\n * @return <0 for failure, >=0 otherwise\n */\nint av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);\n\n/**\n * Enlarge an AVFifoBuffer.\n * In case of reallocation failure, the old FIFO is kept unchanged.\n * The new fifo size may be larger than the requested size.\n *\n * @param f AVFifoBuffer to resize\n * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size()\n * @return <0 for failure, >=0 otherwise\n */\nint av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space);\n\n/**\n * Read and discard the specified amount of data from an AVFifoBuffer.\n * @param f AVFifoBuffer to read from\n * @param size amount of data to read in bytes\n */\nvoid av_fifo_drain(AVFifoBuffer *f, int size);\n\n/**\n * Return a pointer to the data stored in a FIFO buffer at a certain offset.\n * The FIFO buffer is not modified.\n *\n * @param f    AVFifoBuffer to peek at, f must be non-NULL\n * @param offs an offset in bytes, its absolute value must be less\n *             than the used buffer size or the returned pointer will\n *             point outside to the buffer data.\n *             The used buffer size can be checked with av_fifo_size().\n */\nstatic inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)\n{\n    uint8_t *ptr = f->rptr + offs;\n    if (ptr >= f->end)\n        ptr = f->buffer + (ptr - f->end);\n    else if (ptr < f->buffer)\n        ptr = f->end - (f->buffer - ptr);\n    return ptr;\n}\n\n#endif /* AVUTIL_FIFO_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/file.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_FILE_H\n#define AVUTIL_FILE_H\n\n#include <stdint.h>\n\n#include \"avutil.h\"\n\n/**\n * @file\n * Misc file utilities.\n */\n\n/**\n * Read the file with name filename, and put its content in a newly\n * allocated buffer or map it with mmap() when available.\n * In case of success set *bufptr to the read or mmapped buffer, and\n * *size to the size in bytes of the buffer in *bufptr.\n * The returned buffer must be released with av_file_unmap().\n *\n * @param log_offset loglevel offset used for logging\n * @param log_ctx context used for logging\n * @return a non negative number in case of success, a negative value\n * corresponding to an AVERROR error code in case of failure\n */\nint av_file_map(const char *filename, uint8_t **bufptr, size_t *size,\n                int log_offset, void *log_ctx);\n\n/**\n * Unmap or free the buffer bufptr created by av_file_map().\n *\n * @param size size in bytes of bufptr, must be the same as returned\n * by av_file_map()\n */\nvoid av_file_unmap(uint8_t *bufptr, size_t size);\n\n/**\n * Wrapper to work around the lack of mkstemp() on mingw.\n * Also, tries to create file in /tmp first, if possible.\n * *prefix can be a character constant; *filename will be allocated internally.\n * @return file descriptor of opened file (or -1 on error)\n * and opened file name in **filename.\n * @note On very old libcs it is necessary to set a secure umask before\n *       calling this, av_tempfile() can't call umask itself as it is used in\n *       libraries and could interfere with the calling application.\n */\nint av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx);\n\n#endif /* AVUTIL_FILE_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/frame.h",
    "content": "/*\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * @ingroup lavu_frame\n * reference-counted frame API\n */\n\n#ifndef AVUTIL_FRAME_H\n#define AVUTIL_FRAME_H\n\n#include <stdint.h>\n\n#include \"avutil.h\"\n#include \"buffer.h\"\n#include \"dict.h\"\n#include \"rational.h\"\n#include \"samplefmt.h\"\n#include \"version.h\"\n\n\nenum AVColorSpace{\n    AVCOL_SPC_RGB         =  0,\n    AVCOL_SPC_BT709       =  1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B\n    AVCOL_SPC_UNSPECIFIED =  2,\n    AVCOL_SPC_FCC         =  4,\n    AVCOL_SPC_BT470BG     =  5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601\n    AVCOL_SPC_SMPTE170M   =  6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above\n    AVCOL_SPC_SMPTE240M   =  7,\n    AVCOL_SPC_YCOCG       =  8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16\n    AVCOL_SPC_BT2020_NCL  =  9, ///< ITU-R BT2020 non-constant luminance system\n    AVCOL_SPC_BT2020_CL   = 10, ///< ITU-R BT2020 constant luminance system\n    AVCOL_SPC_NB              , ///< Not part of ABI\n};\n#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG\n\nenum AVColorRange{\n    AVCOL_RANGE_UNSPECIFIED = 0,\n    AVCOL_RANGE_MPEG        = 1, ///< the normal 219*2^(n-8) \"MPEG\" YUV ranges\n    AVCOL_RANGE_JPEG        = 2, ///< the normal     2^n-1   \"JPEG\" YUV ranges\n    AVCOL_RANGE_NB             , ///< Not part of ABI\n};\n\n\n/**\n * @defgroup lavu_frame AVFrame\n * @ingroup lavu_data\n *\n * @{\n * AVFrame is an abstraction for reference-counted raw multimedia data.\n */\n\nenum AVFrameSideDataType {\n    /**\n     * The data is the AVPanScan struct defined in libavcodec.\n     */\n    AV_FRAME_DATA_PANSCAN,\n    /**\n     * ATSC A53 Part 4 Closed Captions.\n     * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.\n     * The number of bytes of CC data is AVFrameSideData.size.\n     */\n    AV_FRAME_DATA_A53_CC,\n    /**\n     * Stereoscopic 3d metadata.\n     * The data is the AVStereo3D struct defined in libavutil/stereo3d.h.\n     */\n    AV_FRAME_DATA_STEREO3D,\n    /**\n     * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.\n     */\n    AV_FRAME_DATA_MATRIXENCODING,\n    /**\n     * Metadata relevant to a downmix procedure.\n     * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.\n     */\n    AV_FRAME_DATA_DOWNMIX_INFO,\n};\n\ntypedef struct AVFrameSideData {\n    enum AVFrameSideDataType type;\n    uint8_t *data;\n    int      size;\n    AVDictionary *metadata;\n} AVFrameSideData;\n\n/**\n * This structure describes decoded (raw) audio or video data.\n *\n * AVFrame must be allocated using av_frame_alloc(). Note that this only\n * allocates the AVFrame itself, the buffers for the data must be managed\n * through other means (see below).\n * AVFrame must be freed with av_frame_free().\n *\n * AVFrame is typically allocated once and then reused multiple times to hold\n * different data (e.g. a single AVFrame to hold frames received from a\n * decoder). In such a case, av_frame_unref() will free any references held by\n * the frame and reset it to its original clean state before it\n * is reused again.\n *\n * The data described by an AVFrame is usually reference counted through the\n * AVBuffer API. The underlying buffer references are stored in AVFrame.buf /\n * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at\n * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,\n * every single data plane must be contained in one of the buffers in\n * AVFrame.buf or AVFrame.extended_buf.\n * There may be a single buffer for all the data, or one separate buffer for\n * each plane, or anything in between.\n *\n * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added\n * to the end with a minor bump.\n * Similarly fields that are marked as to be only accessed by\n * av_opt_ptr() can be reordered. This allows 2 forks to add fields\n * without breaking compatibility with each other.\n */\ntypedef struct AVFrame {\n#define AV_NUM_DATA_POINTERS 8\n    /**\n     * pointer to the picture/channel planes.\n     * This might be different from the first allocated byte\n     *\n     * Some decoders access areas outside 0,0 - width,height, please\n     * see avcodec_align_dimensions2(). Some filters and swscale can read\n     * up to 16 bytes beyond the planes, if these filters are to be used,\n     * then 16 extra bytes must be allocated.\n     */\n    uint8_t *data[AV_NUM_DATA_POINTERS];\n\n    /**\n     * For video, size in bytes of each picture line.\n     * For audio, size in bytes of each plane.\n     *\n     * For audio, only linesize[0] may be set. For planar audio, each channel\n     * plane must be the same size.\n     *\n     * For video the linesizes should be multiplies of the CPUs alignment\n     * preference, this is 16 or 32 for modern desktop CPUs.\n     * Some code requires such alignment other code can be slower without\n     * correct alignment, for yet other it makes no difference.\n     *\n     * @note The linesize may be larger than the size of usable data -- there\n     * may be extra padding present for performance reasons.\n     */\n    int linesize[AV_NUM_DATA_POINTERS];\n\n    /**\n     * pointers to the data planes/channels.\n     *\n     * For video, this should simply point to data[].\n     *\n     * For planar audio, each channel has a separate data pointer, and\n     * linesize[0] contains the size of each channel buffer.\n     * For packed audio, there is just one data pointer, and linesize[0]\n     * contains the total size of the buffer for all channels.\n     *\n     * Note: Both data and extended_data should always be set in a valid frame,\n     * but for planar audio with more channels that can fit in data,\n     * extended_data must be used in order to access all channels.\n     */\n    uint8_t **extended_data;\n\n    /**\n     * width and height of the video frame\n     */\n    int width, height;\n\n    /**\n     * number of audio samples (per channel) described by this frame\n     */\n    int nb_samples;\n\n    /**\n     * format of the frame, -1 if unknown or unset\n     * Values correspond to enum AVPixelFormat for video frames,\n     * enum AVSampleFormat for audio)\n     */\n    int format;\n\n    /**\n     * 1 -> keyframe, 0-> not\n     */\n    int key_frame;\n\n    /**\n     * Picture type of the frame.\n     */\n    enum AVPictureType pict_type;\n\n#if FF_API_AVFRAME_LAVC\n    attribute_deprecated\n    uint8_t *base[AV_NUM_DATA_POINTERS];\n#endif\n\n    /**\n     * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.\n     */\n    AVRational sample_aspect_ratio;\n\n    /**\n     * Presentation timestamp in time_base units (time when frame should be shown to user).\n     */\n    int64_t pts;\n\n    /**\n     * PTS copied from the AVPacket that was decoded to produce this frame.\n     */\n    int64_t pkt_pts;\n\n    /**\n     * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isnt used)\n     * This is also the Presentation time of this AVFrame calculated from\n     * only AVPacket.dts values without pts values.\n     */\n    int64_t pkt_dts;\n\n    /**\n     * picture number in bitstream order\n     */\n    int coded_picture_number;\n    /**\n     * picture number in display order\n     */\n    int display_picture_number;\n\n    /**\n     * quality (between 1 (good) and FF_LAMBDA_MAX (bad))\n     */\n    int quality;\n\n#if FF_API_AVFRAME_LAVC\n    attribute_deprecated\n    int reference;\n\n    /**\n     * QP table\n     */\n    attribute_deprecated\n    int8_t *qscale_table;\n    /**\n     * QP store stride\n     */\n    attribute_deprecated\n    int qstride;\n\n    attribute_deprecated\n    int qscale_type;\n\n    /**\n     * mbskip_table[mb]>=1 if MB didn't change\n     * stride= mb_width = (width+15)>>4\n     */\n    attribute_deprecated\n    uint8_t *mbskip_table;\n\n    /**\n     * motion vector table\n     * @code\n     * example:\n     * int mv_sample_log2= 4 - motion_subsample_log2;\n     * int mb_width= (width+15)>>4;\n     * int mv_stride= (mb_width << mv_sample_log2) + 1;\n     * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];\n     * @endcode\n     */\n    attribute_deprecated\n    int16_t (*motion_val[2])[2];\n\n    /**\n     * macroblock type table\n     * mb_type_base + mb_width + 2\n     */\n    attribute_deprecated\n    uint32_t *mb_type;\n\n    /**\n     * DCT coefficients\n     */\n    attribute_deprecated\n    short *dct_coeff;\n\n    /**\n     * motion reference frame index\n     * the order in which these are stored can depend on the codec.\n     */\n    attribute_deprecated\n    int8_t *ref_index[2];\n#endif\n\n    /**\n     * for some private data of the user\n     */\n    void *opaque;\n\n    /**\n     * error\n     */\n    uint64_t error[AV_NUM_DATA_POINTERS];\n\n#if FF_API_AVFRAME_LAVC\n    attribute_deprecated\n    int type;\n#endif\n\n    /**\n     * When decoding, this signals how much the picture must be delayed.\n     * extra_delay = repeat_pict / (2*fps)\n     */\n    int repeat_pict;\n\n    /**\n     * The content of the picture is interlaced.\n     */\n    int interlaced_frame;\n\n    /**\n     * If the content is interlaced, is top field displayed first.\n     */\n    int top_field_first;\n\n    /**\n     * Tell user application that palette has changed from previous frame.\n     */\n    int palette_has_changed;\n\n#if FF_API_AVFRAME_LAVC\n    attribute_deprecated\n    int buffer_hints;\n\n    /**\n     * Pan scan.\n     */\n    attribute_deprecated\n    struct AVPanScan *pan_scan;\n#endif\n\n    /**\n     * reordered opaque 64bit (generally an integer or a double precision float\n     * PTS but can be anything).\n     * The user sets AVCodecContext.reordered_opaque to represent the input at\n     * that time,\n     * the decoder reorders values as needed and sets AVFrame.reordered_opaque\n     * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque\n     * @deprecated in favor of pkt_pts\n     */\n    int64_t reordered_opaque;\n\n#if FF_API_AVFRAME_LAVC\n    /**\n     * @deprecated this field is unused\n     */\n    attribute_deprecated void *hwaccel_picture_private;\n\n    attribute_deprecated\n    struct AVCodecContext *owner;\n    attribute_deprecated\n    void *thread_opaque;\n\n    /**\n     * log2 of the size of the block which a single vector in motion_val represents:\n     * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)\n     */\n    attribute_deprecated\n    uint8_t motion_subsample_log2;\n#endif\n\n    /**\n     * Sample rate of the audio data.\n     */\n    int sample_rate;\n\n    /**\n     * Channel layout of the audio data.\n     */\n    uint64_t channel_layout;\n\n    /**\n     * AVBuffer references backing the data for this frame. If all elements of\n     * this array are NULL, then this frame is not reference counted.\n     *\n     * There may be at most one AVBuffer per data plane, so for video this array\n     * always contains all the references. For planar audio with more than\n     * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in\n     * this array. Then the extra AVBufferRef pointers are stored in the\n     * extended_buf array.\n     */\n    AVBufferRef *buf[AV_NUM_DATA_POINTERS];\n\n    /**\n     * For planar audio which requires more than AV_NUM_DATA_POINTERS\n     * AVBufferRef pointers, this array will hold all the references which\n     * cannot fit into AVFrame.buf.\n     *\n     * Note that this is different from AVFrame.extended_data, which always\n     * contains all the pointers. This array only contains the extra pointers,\n     * which cannot fit into AVFrame.buf.\n     *\n     * This array is always allocated using av_malloc() by whoever constructs\n     * the frame. It is freed in av_frame_unref().\n     */\n    AVBufferRef **extended_buf;\n    /**\n     * Number of elements in extended_buf.\n     */\n    int        nb_extended_buf;\n\n    AVFrameSideData **side_data;\n    int            nb_side_data;\n\n/**\n * @defgroup lavu_frame_flags AV_FRAME_FLAGS\n * Flags describing additional frame properties.\n *\n * @{\n */\n\n/**\n * The frame data may be corrupted, e.g. due to decoding errors.\n */\n#define AV_FRAME_FLAG_CORRUPT       (1 << 0)\n/**\n * @}\n */\n\n    /**\n     * Frame flags, a combination of @ref lavu_frame_flags\n     */\n    int flags;\n\n    /**\n     * frame timestamp estimated using various heuristics, in stream time base\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_best_effort_timestamp(frame)\n     * - encoding: unused\n     * - decoding: set by libavcodec, read by user.\n     */\n    int64_t best_effort_timestamp;\n\n    /**\n     * reordered pos from the last AVPacket that has been input into the decoder\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_pkt_pos(frame)\n     * - encoding: unused\n     * - decoding: Read by user.\n     */\n    int64_t pkt_pos;\n\n    /**\n     * duration of the corresponding packet, expressed in\n     * AVStream->time_base units, 0 if unknown.\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_pkt_duration(frame)\n     * - encoding: unused\n     * - decoding: Read by user.\n     */\n    int64_t pkt_duration;\n\n    /**\n     * metadata.\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_metadata(frame)\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    AVDictionary *metadata;\n\n    /**\n     * decode error flags of the frame, set to a combination of\n     * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there\n     * were errors during the decoding.\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_decode_error_flags(frame)\n     * - encoding: unused\n     * - decoding: set by libavcodec, read by user.\n     */\n    int decode_error_flags;\n#define FF_DECODE_ERROR_INVALID_BITSTREAM   1\n#define FF_DECODE_ERROR_MISSING_REFERENCE   2\n\n    /**\n     * number of audio channels, only used for audio.\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_channels(frame)\n     * - encoding: unused\n     * - decoding: Read by user.\n     */\n    int channels;\n\n    /**\n     * size of the corresponding packet containing the compressed\n     * frame. It must be accessed using av_frame_get_pkt_size() and\n     * av_frame_set_pkt_size().\n     * It is set to a negative value if unknown.\n     * - encoding: unused\n     * - decoding: set by libavcodec, read by user.\n     */\n    int pkt_size;\n\n    /**\n     * YUV colorspace type.\n     * It must be accessed using av_frame_get_colorspace() and\n     * av_frame_set_colorspace().\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorSpace colorspace;\n\n    /**\n     * MPEG vs JPEG YUV range.\n     * It must be accessed using av_frame_get_color_range() and\n     * av_frame_set_color_range().\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorRange color_range;\n\n\n    /**\n     * Not to be accessed directly from outside libavutil\n     */\n    AVBufferRef *qp_table_buf;\n} AVFrame;\n\n/**\n * Accessors for some AVFrame fields.\n * The position of these field in the structure is not part of the ABI,\n * they should not be accessed directly outside libavcodec.\n */\nint64_t av_frame_get_best_effort_timestamp(const AVFrame *frame);\nvoid    av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val);\nint64_t av_frame_get_pkt_duration         (const AVFrame *frame);\nvoid    av_frame_set_pkt_duration         (AVFrame *frame, int64_t val);\nint64_t av_frame_get_pkt_pos              (const AVFrame *frame);\nvoid    av_frame_set_pkt_pos              (AVFrame *frame, int64_t val);\nint64_t av_frame_get_channel_layout       (const AVFrame *frame);\nvoid    av_frame_set_channel_layout       (AVFrame *frame, int64_t val);\nint     av_frame_get_channels             (const AVFrame *frame);\nvoid    av_frame_set_channels             (AVFrame *frame, int     val);\nint     av_frame_get_sample_rate          (const AVFrame *frame);\nvoid    av_frame_set_sample_rate          (AVFrame *frame, int     val);\nAVDictionary *av_frame_get_metadata       (const AVFrame *frame);\nvoid          av_frame_set_metadata       (AVFrame *frame, AVDictionary *val);\nint     av_frame_get_decode_error_flags   (const AVFrame *frame);\nvoid    av_frame_set_decode_error_flags   (AVFrame *frame, int     val);\nint     av_frame_get_pkt_size(const AVFrame *frame);\nvoid    av_frame_set_pkt_size(AVFrame *frame, int val);\nAVDictionary **avpriv_frame_get_metadatap(AVFrame *frame);\nint8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type);\nint av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type);\nenum AVColorSpace av_frame_get_colorspace(const AVFrame *frame);\nvoid    av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val);\nenum AVColorRange av_frame_get_color_range(const AVFrame *frame);\nvoid    av_frame_set_color_range(AVFrame *frame, enum AVColorRange val);\n\n/**\n * Get the name of a colorspace.\n * @return a static string identifying the colorspace; can be NULL.\n */\nconst char *av_get_colorspace_name(enum AVColorSpace val);\n\n/**\n * Allocate an AVFrame and set its fields to default values.  The resulting\n * struct must be freed using av_frame_free().\n *\n * @return An AVFrame filled with default values or NULL on failure.\n *\n * @note this only allocates the AVFrame itself, not the data buffers. Those\n * must be allocated through other means, e.g. with av_frame_get_buffer() or\n * manually.\n */\nAVFrame *av_frame_alloc(void);\n\n/**\n * Free the frame and any dynamically allocated objects in it,\n * e.g. extended_data. If the frame is reference counted, it will be\n * unreferenced first.\n *\n * @param frame frame to be freed. The pointer will be set to NULL.\n */\nvoid av_frame_free(AVFrame **frame);\n\n/**\n * Set up a new reference to the data described by the source frame.\n *\n * Copy frame properties from src to dst and create a new reference for each\n * AVBufferRef from src.\n *\n * If src is not reference counted, new buffers are allocated and the data is\n * copied.\n *\n * @return 0 on success, a negative AVERROR on error\n */\nint av_frame_ref(AVFrame *dst, const AVFrame *src);\n\n/**\n * Create a new frame that references the same data as src.\n *\n * This is a shortcut for av_frame_alloc()+av_frame_ref().\n *\n * @return newly created AVFrame on success, NULL on error.\n */\nAVFrame *av_frame_clone(const AVFrame *src);\n\n/**\n * Unreference all the buffers referenced by frame and reset the frame fields.\n */\nvoid av_frame_unref(AVFrame *frame);\n\n/**\n * Move everythnig contained in src to dst and reset src.\n */\nvoid av_frame_move_ref(AVFrame *dst, AVFrame *src);\n\n/**\n * Allocate new buffer(s) for audio or video data.\n *\n * The following fields must be set on frame before calling this function:\n * - format (pixel format for video, sample format for audio)\n * - width and height for video\n * - nb_samples and channel_layout for audio\n *\n * This function will fill AVFrame.data and AVFrame.buf arrays and, if\n * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.\n * For planar formats, one buffer will be allocated for each plane.\n *\n * @param frame frame in which to store the new buffers.\n * @param align required buffer size alignment\n *\n * @return 0 on success, a negative AVERROR on error.\n */\nint av_frame_get_buffer(AVFrame *frame, int align);\n\n/**\n * Check if the frame data is writable.\n *\n * @return A positive value if the frame data is writable (which is true if and\n * only if each of the underlying buffers has only one reference, namely the one\n * stored in this frame). Return 0 otherwise.\n *\n * If 1 is returned the answer is valid until av_buffer_ref() is called on any\n * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).\n *\n * @see av_frame_make_writable(), av_buffer_is_writable()\n */\nint av_frame_is_writable(AVFrame *frame);\n\n/**\n * Ensure that the frame data is writable, avoiding data copy if possible.\n *\n * Do nothing if the frame is writable, allocate new buffers and copy the data\n * if it is not.\n *\n * @return 0 on success, a negative AVERROR on error.\n *\n * @see av_frame_is_writable(), av_buffer_is_writable(),\n * av_buffer_make_writable()\n */\nint av_frame_make_writable(AVFrame *frame);\n\n/**\n * Copy the frame data from src to dst.\n *\n * This function does not allocate anything, dst must be already initialized and\n * allocated with the same parameters as src.\n *\n * This function only copies the frame data (i.e. the contents of the data /\n * extended data arrays), not any other properties.\n *\n * @return >= 0 on success, a negative AVERROR on error.\n */\nint av_frame_copy(AVFrame *dst, const AVFrame *src);\n\n/**\n * Copy only \"metadata\" fields from src to dst.\n *\n * Metadata for the purpose of this function are those fields that do not affect\n * the data layout in the buffers.  E.g. pts, sample rate (for audio) or sample\n * aspect ratio (for video), but not width/height or channel layout.\n * Side data is also copied.\n */\nint av_frame_copy_props(AVFrame *dst, const AVFrame *src);\n\n/**\n * Get the buffer reference a given data plane is stored in.\n *\n * @param plane index of the data plane of interest in frame->extended_data.\n *\n * @return the buffer reference that contains the plane or NULL if the input\n * frame is not valid.\n */\nAVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);\n\n/**\n * Add a new side data to a frame.\n *\n * @param frame a frame to which the side data should be added\n * @param type type of the added side data\n * @param size size of the side data\n *\n * @return newly added side data on success, NULL on error\n */\nAVFrameSideData *av_frame_new_side_data(AVFrame *frame,\n                                        enum AVFrameSideDataType type,\n                                        int size);\n\n/**\n * @return a pointer to the side data of a given type on success, NULL if there\n * is no side data with such type in this frame.\n */\nAVFrameSideData *av_frame_get_side_data(const AVFrame *frame,\n                                        enum AVFrameSideDataType type);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_FRAME_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/hmac.h",
    "content": "/*\n * Copyright (C) 2012 Martin Storsjo\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_HMAC_H\n#define AVUTIL_HMAC_H\n\n#include <stdint.h>\n\n/**\n * @defgroup lavu_hmac HMAC\n * @ingroup lavu_crypto\n * @{\n */\n\nenum AVHMACType {\n    AV_HMAC_MD5,\n    AV_HMAC_SHA1,\n    AV_HMAC_SHA224 = 10,\n    AV_HMAC_SHA256,\n    AV_HMAC_SHA384,\n    AV_HMAC_SHA512,\n};\n\ntypedef struct AVHMAC AVHMAC;\n\n/**\n * Allocate an AVHMAC context.\n * @param type The hash function used for the HMAC.\n */\nAVHMAC *av_hmac_alloc(enum AVHMACType type);\n\n/**\n * Free an AVHMAC context.\n * @param ctx The context to free, may be NULL\n */\nvoid av_hmac_free(AVHMAC *ctx);\n\n/**\n * Initialize an AVHMAC context with an authentication key.\n * @param ctx    The HMAC context\n * @param key    The authentication key\n * @param keylen The length of the key, in bytes\n */\nvoid av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen);\n\n/**\n * Hash data with the HMAC.\n * @param ctx  The HMAC context\n * @param data The data to hash\n * @param len  The length of the data, in bytes\n */\nvoid av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len);\n\n/**\n * Finish hashing and output the HMAC digest.\n * @param ctx    The HMAC context\n * @param out    The output buffer to write the digest into\n * @param outlen The length of the out buffer, in bytes\n * @return       The number of bytes written to out, or a negative error code.\n */\nint av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen);\n\n/**\n * Hash an array of data with a key.\n * @param ctx    The HMAC context\n * @param data   The data to hash\n * @param len    The length of the data, in bytes\n * @param key    The authentication key\n * @param keylen The length of the key, in bytes\n * @param out    The output buffer to write the digest into\n * @param outlen The length of the out buffer, in bytes\n * @return       The number of bytes written to out, or a negative error code.\n */\nint av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len,\n                 const uint8_t *key, unsigned int keylen,\n                 uint8_t *out, unsigned int outlen);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_HMAC_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/imgutils.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_IMGUTILS_H\n#define AVUTIL_IMGUTILS_H\n\n/**\n * @file\n * misc image utilities\n *\n * @addtogroup lavu_picture\n * @{\n */\n\n#include \"avutil.h\"\n#include \"pixdesc.h\"\n\n/**\n * Compute the max pixel step for each plane of an image with a\n * format described by pixdesc.\n *\n * The pixel step is the distance in bytes between the first byte of\n * the group of bytes which describe a pixel component and the first\n * byte of the successive group in the same plane for the same\n * component.\n *\n * @param max_pixsteps an array which is filled with the max pixel step\n * for each plane. Since a plane may contain different pixel\n * components, the computed max_pixsteps[plane] is relative to the\n * component in the plane with the max pixel step.\n * @param max_pixstep_comps an array which is filled with the component\n * for each plane which has the max pixel step. May be NULL.\n */\nvoid av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4],\n                                const AVPixFmtDescriptor *pixdesc);\n\n/**\n * Compute the size of an image line with format pix_fmt and width\n * width for the plane plane.\n *\n * @return the computed size in bytes\n */\nint av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane);\n\n/**\n * Fill plane linesizes for an image with pixel format pix_fmt and\n * width width.\n *\n * @param linesizes array to be filled with the linesize for each plane\n * @return >= 0 in case of success, a negative error code otherwise\n */\nint av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width);\n\n/**\n * Fill plane data pointers for an image with pixel format pix_fmt and\n * height height.\n *\n * @param data pointers array to be filled with the pointer for each image plane\n * @param ptr the pointer to a buffer which will contain the image\n * @param linesizes the array containing the linesize for each\n * plane, should be filled by av_image_fill_linesizes()\n * @return the size in bytes required for the image buffer, a negative\n * error code in case of failure\n */\nint av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,\n                           uint8_t *ptr, const int linesizes[4]);\n\n/**\n * Allocate an image with size w and h and pixel format pix_fmt, and\n * fill pointers and linesizes accordingly.\n * The allocated image buffer has to be freed by using\n * av_freep(&pointers[0]).\n *\n * @param align the value to use for buffer size alignment\n * @return the size in bytes required for the image buffer, a negative\n * error code in case of failure\n */\nint av_image_alloc(uint8_t *pointers[4], int linesizes[4],\n                   int w, int h, enum AVPixelFormat pix_fmt, int align);\n\n/**\n * Copy image plane from src to dst.\n * That is, copy \"height\" number of lines of \"bytewidth\" bytes each.\n * The first byte of each successive line is separated by *_linesize\n * bytes.\n *\n * bytewidth must be contained by both absolute values of dst_linesize\n * and src_linesize, otherwise the function behavior is undefined.\n *\n * @param dst_linesize linesize for the image plane in dst\n * @param src_linesize linesize for the image plane in src\n */\nvoid av_image_copy_plane(uint8_t       *dst, int dst_linesize,\n                         const uint8_t *src, int src_linesize,\n                         int bytewidth, int height);\n\n/**\n * Copy image in src_data to dst_data.\n *\n * @param dst_linesizes linesizes for the image in dst_data\n * @param src_linesizes linesizes for the image in src_data\n */\nvoid av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],\n                   const uint8_t *src_data[4], const int src_linesizes[4],\n                   enum AVPixelFormat pix_fmt, int width, int height);\n\n/**\n * Setup the data pointers and linesizes based on the specified image\n * parameters and the provided array.\n *\n * The fields of the given image are filled in by using the src\n * address which points to the image data buffer. Depending on the\n * specified pixel format, one or multiple image data pointers and\n * line sizes will be set.  If a planar format is specified, several\n * pointers will be set pointing to the different picture planes and\n * the line sizes of the different planes will be stored in the\n * lines_sizes array. Call with src == NULL to get the required\n * size for the src buffer.\n *\n * To allocate the buffer and fill in the dst_data and dst_linesize in\n * one call, use av_image_alloc().\n *\n * @param dst_data      data pointers to be filled in\n * @param dst_linesizes linesizes for the image in dst_data to be filled in\n * @param src           buffer which will contain or contains the actual image data, can be NULL\n * @param pix_fmt       the pixel format of the image\n * @param width         the width of the image in pixels\n * @param height        the height of the image in pixels\n * @param align         the value used in src for linesize alignment\n * @return the size in bytes required for src, a negative error code\n * in case of failure\n */\nint av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4],\n                         const uint8_t *src,\n                         enum AVPixelFormat pix_fmt, int width, int height, int align);\n\n/**\n * Return the size in bytes of the amount of data required to store an\n * image with the given parameters.\n *\n * @param[in] align the assumed linesize alignment\n */\nint av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align);\n\n/**\n * Copy image data from an image into a buffer.\n *\n * av_image_get_buffer_size() can be used to compute the required size\n * for the buffer to fill.\n *\n * @param dst           a buffer into which picture data will be copied\n * @param dst_size      the size in bytes of dst\n * @param src_data      pointers containing the source image data\n * @param src_linesizes linesizes for the image in src_data\n * @param pix_fmt       the pixel format of the source image\n * @param width         the width of the source image in pixels\n * @param height        the height of the source image in pixels\n * @param align         the assumed linesize alignment for dst\n * @return the number of bytes written to dst, or a negative value\n * (error code) on error\n */\nint av_image_copy_to_buffer(uint8_t *dst, int dst_size,\n                            const uint8_t * const src_data[4], const int src_linesize[4],\n                            enum AVPixelFormat pix_fmt, int width, int height, int align);\n\n/**\n * Check if the given dimension of an image is valid, meaning that all\n * bytes of the image can be addressed with a signed int.\n *\n * @param w the width of the picture\n * @param h the height of the picture\n * @param log_offset the offset to sum to the log level for logging with log_ctx\n * @param log_ctx the parent logging context, it may be NULL\n * @return >= 0 if valid, a negative error code otherwise\n */\nint av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx);\n\nint avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt);\n\n/**\n * @}\n */\n\n\n#endif /* AVUTIL_IMGUTILS_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/intfloat.h",
    "content": "/*\n * Copyright (c) 2011 Mans Rullgard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_INTFLOAT_H\n#define AVUTIL_INTFLOAT_H\n\n#include <stdint.h>\n#include \"attributes.h\"\n\nunion av_intfloat32 {\n    uint32_t i;\n    float    f;\n};\n\nunion av_intfloat64 {\n    uint64_t i;\n    double   f;\n};\n\n/**\n * Reinterpret a 32-bit integer as a float.\n */\nstatic av_always_inline float av_int2float(uint32_t i)\n{\n    union av_intfloat32 v;\n    v.i = i;\n    return v.f;\n}\n\n/**\n * Reinterpret a float as a 32-bit integer.\n */\nstatic av_always_inline uint32_t av_float2int(float f)\n{\n    union av_intfloat32 v;\n    v.f = f;\n    return v.i;\n}\n\n/**\n * Reinterpret a 64-bit integer as a double.\n */\nstatic av_always_inline double av_int2double(uint64_t i)\n{\n    union av_intfloat64 v;\n    v.i = i;\n    return v.f;\n}\n\n/**\n * Reinterpret a double as a 64-bit integer.\n */\nstatic av_always_inline uint64_t av_double2int(double f)\n{\n    union av_intfloat64 v;\n    v.f = f;\n    return v.i;\n}\n\n#endif /* AVUTIL_INTFLOAT_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/intfloat_readwrite.h",
    "content": "/*\n * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_INTFLOAT_READWRITE_H\n#define AVUTIL_INTFLOAT_READWRITE_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n#if FF_API_INTFLOAT\n/* IEEE 80 bits extended float */\ntypedef struct AVExtFloat  {\n    uint8_t exponent[2];\n    uint8_t mantissa[8];\n} AVExtFloat;\n\nattribute_deprecated double av_int2dbl(int64_t v) av_const;\nattribute_deprecated float av_int2flt(int32_t v) av_const;\nattribute_deprecated double av_ext2dbl(const AVExtFloat ext) av_const;\nattribute_deprecated int64_t av_dbl2int(double d) av_const;\nattribute_deprecated int32_t av_flt2int(float d) av_const;\nattribute_deprecated AVExtFloat av_dbl2ext(double d) av_const;\n#endif /* FF_API_INTFLOAT */\n\n#endif /* AVUTIL_INTFLOAT_READWRITE_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/intreadwrite.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_INTREADWRITE_H\n#define AVUTIL_INTREADWRITE_H\n\n#include <stdint.h>\n#include \"libavutil/avconfig.h\"\n#include \"attributes.h\"\n#include \"bswap.h\"\n\ntypedef union {\n    uint64_t u64;\n    uint32_t u32[2];\n    uint16_t u16[4];\n    uint8_t  u8 [8];\n    double   f64;\n    float    f32[2];\n} av_alias av_alias64;\n\ntypedef union {\n    uint32_t u32;\n    uint16_t u16[2];\n    uint8_t  u8 [4];\n    float    f32;\n} av_alias av_alias32;\n\ntypedef union {\n    uint16_t u16;\n    uint8_t  u8 [2];\n} av_alias av_alias16;\n\n/*\n * Arch-specific headers can provide any combination of\n * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.\n * Preprocessor symbols must be defined, even if these are implemented\n * as inline functions.\n */\n\n#ifdef HAVE_AV_CONFIG_H\n\n#include \"config.h\"\n\n#if   ARCH_ARM\n#   include \"arm/intreadwrite.h\"\n#elif ARCH_AVR32\n#   include \"avr32/intreadwrite.h\"\n#elif ARCH_MIPS\n#   include \"mips/intreadwrite.h\"\n#elif ARCH_PPC\n#   include \"ppc/intreadwrite.h\"\n#elif ARCH_TOMI\n#   include \"tomi/intreadwrite.h\"\n#elif ARCH_X86\n#   include \"x86/intreadwrite.h\"\n#endif\n\n#endif /* HAVE_AV_CONFIG_H */\n\n/*\n * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.\n */\n\n#if AV_HAVE_BIGENDIAN\n\n#   if    defined(AV_RN16) && !defined(AV_RB16)\n#       define AV_RB16(p) AV_RN16(p)\n#   elif !defined(AV_RN16) &&  defined(AV_RB16)\n#       define AV_RN16(p) AV_RB16(p)\n#   endif\n\n#   if    defined(AV_WN16) && !defined(AV_WB16)\n#       define AV_WB16(p, v) AV_WN16(p, v)\n#   elif !defined(AV_WN16) &&  defined(AV_WB16)\n#       define AV_WN16(p, v) AV_WB16(p, v)\n#   endif\n\n#   if    defined(AV_RN24) && !defined(AV_RB24)\n#       define AV_RB24(p) AV_RN24(p)\n#   elif !defined(AV_RN24) &&  defined(AV_RB24)\n#       define AV_RN24(p) AV_RB24(p)\n#   endif\n\n#   if    defined(AV_WN24) && !defined(AV_WB24)\n#       define AV_WB24(p, v) AV_WN24(p, v)\n#   elif !defined(AV_WN24) &&  defined(AV_WB24)\n#       define AV_WN24(p, v) AV_WB24(p, v)\n#   endif\n\n#   if    defined(AV_RN32) && !defined(AV_RB32)\n#       define AV_RB32(p) AV_RN32(p)\n#   elif !defined(AV_RN32) &&  defined(AV_RB32)\n#       define AV_RN32(p) AV_RB32(p)\n#   endif\n\n#   if    defined(AV_WN32) && !defined(AV_WB32)\n#       define AV_WB32(p, v) AV_WN32(p, v)\n#   elif !defined(AV_WN32) &&  defined(AV_WB32)\n#       define AV_WN32(p, v) AV_WB32(p, v)\n#   endif\n\n#   if    defined(AV_RN48) && !defined(AV_RB48)\n#       define AV_RB48(p) AV_RN48(p)\n#   elif !defined(AV_RN48) &&  defined(AV_RB48)\n#       define AV_RN48(p) AV_RB48(p)\n#   endif\n\n#   if    defined(AV_WN48) && !defined(AV_WB48)\n#       define AV_WB48(p, v) AV_WN48(p, v)\n#   elif !defined(AV_WN48) &&  defined(AV_WB48)\n#       define AV_WN48(p, v) AV_WB48(p, v)\n#   endif\n\n#   if    defined(AV_RN64) && !defined(AV_RB64)\n#       define AV_RB64(p) AV_RN64(p)\n#   elif !defined(AV_RN64) &&  defined(AV_RB64)\n#       define AV_RN64(p) AV_RB64(p)\n#   endif\n\n#   if    defined(AV_WN64) && !defined(AV_WB64)\n#       define AV_WB64(p, v) AV_WN64(p, v)\n#   elif !defined(AV_WN64) &&  defined(AV_WB64)\n#       define AV_WN64(p, v) AV_WB64(p, v)\n#   endif\n\n#else /* AV_HAVE_BIGENDIAN */\n\n#   if    defined(AV_RN16) && !defined(AV_RL16)\n#       define AV_RL16(p) AV_RN16(p)\n#   elif !defined(AV_RN16) &&  defined(AV_RL16)\n#       define AV_RN16(p) AV_RL16(p)\n#   endif\n\n#   if    defined(AV_WN16) && !defined(AV_WL16)\n#       define AV_WL16(p, v) AV_WN16(p, v)\n#   elif !defined(AV_WN16) &&  defined(AV_WL16)\n#       define AV_WN16(p, v) AV_WL16(p, v)\n#   endif\n\n#   if    defined(AV_RN24) && !defined(AV_RL24)\n#       define AV_RL24(p) AV_RN24(p)\n#   elif !defined(AV_RN24) &&  defined(AV_RL24)\n#       define AV_RN24(p) AV_RL24(p)\n#   endif\n\n#   if    defined(AV_WN24) && !defined(AV_WL24)\n#       define AV_WL24(p, v) AV_WN24(p, v)\n#   elif !defined(AV_WN24) &&  defined(AV_WL24)\n#       define AV_WN24(p, v) AV_WL24(p, v)\n#   endif\n\n#   if    defined(AV_RN32) && !defined(AV_RL32)\n#       define AV_RL32(p) AV_RN32(p)\n#   elif !defined(AV_RN32) &&  defined(AV_RL32)\n#       define AV_RN32(p) AV_RL32(p)\n#   endif\n\n#   if    defined(AV_WN32) && !defined(AV_WL32)\n#       define AV_WL32(p, v) AV_WN32(p, v)\n#   elif !defined(AV_WN32) &&  defined(AV_WL32)\n#       define AV_WN32(p, v) AV_WL32(p, v)\n#   endif\n\n#   if    defined(AV_RN48) && !defined(AV_RL48)\n#       define AV_RL48(p) AV_RN48(p)\n#   elif !defined(AV_RN48) &&  defined(AV_RL48)\n#       define AV_RN48(p) AV_RL48(p)\n#   endif\n\n#   if    defined(AV_WN48) && !defined(AV_WL48)\n#       define AV_WL48(p, v) AV_WN48(p, v)\n#   elif !defined(AV_WN48) &&  defined(AV_WL48)\n#       define AV_WN48(p, v) AV_WL48(p, v)\n#   endif\n\n#   if    defined(AV_RN64) && !defined(AV_RL64)\n#       define AV_RL64(p) AV_RN64(p)\n#   elif !defined(AV_RN64) &&  defined(AV_RL64)\n#       define AV_RN64(p) AV_RL64(p)\n#   endif\n\n#   if    defined(AV_WN64) && !defined(AV_WL64)\n#       define AV_WL64(p, v) AV_WN64(p, v)\n#   elif !defined(AV_WN64) &&  defined(AV_WL64)\n#       define AV_WN64(p, v) AV_WL64(p, v)\n#   endif\n\n#endif /* !AV_HAVE_BIGENDIAN */\n\n/*\n * Define AV_[RW]N helper macros to simplify definitions not provided\n * by per-arch headers.\n */\n\n#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__)\n\nunion unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;\nunion unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;\nunion unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;\n\n#   define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)\n#   define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))\n\n#elif defined(__DECC)\n\n#   define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))\n#   define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))\n\n#elif AV_HAVE_FAST_UNALIGNED\n\n#   define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)\n#   define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))\n\n#else\n\n#ifndef AV_RB16\n#   define AV_RB16(x)                           \\\n    ((((const uint8_t*)(x))[0] << 8) |          \\\n      ((const uint8_t*)(x))[1])\n#endif\n#ifndef AV_WB16\n#   define AV_WB16(p, darg) do {                \\\n        unsigned d = (darg);                    \\\n        ((uint8_t*)(p))[1] = (d);               \\\n        ((uint8_t*)(p))[0] = (d)>>8;            \\\n    } while(0)\n#endif\n\n#ifndef AV_RL16\n#   define AV_RL16(x)                           \\\n    ((((const uint8_t*)(x))[1] << 8) |          \\\n      ((const uint8_t*)(x))[0])\n#endif\n#ifndef AV_WL16\n#   define AV_WL16(p, darg) do {                \\\n        unsigned d = (darg);                    \\\n        ((uint8_t*)(p))[0] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n    } while(0)\n#endif\n\n#ifndef AV_RB32\n#   define AV_RB32(x)                                \\\n    (((uint32_t)((const uint8_t*)(x))[0] << 24) |    \\\n               (((const uint8_t*)(x))[1] << 16) |    \\\n               (((const uint8_t*)(x))[2] <<  8) |    \\\n                ((const uint8_t*)(x))[3])\n#endif\n#ifndef AV_WB32\n#   define AV_WB32(p, darg) do {                \\\n        unsigned d = (darg);                    \\\n        ((uint8_t*)(p))[3] = (d);               \\\n        ((uint8_t*)(p))[2] = (d)>>8;            \\\n        ((uint8_t*)(p))[1] = (d)>>16;           \\\n        ((uint8_t*)(p))[0] = (d)>>24;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RL32\n#   define AV_RL32(x)                                \\\n    (((uint32_t)((const uint8_t*)(x))[3] << 24) |    \\\n               (((const uint8_t*)(x))[2] << 16) |    \\\n               (((const uint8_t*)(x))[1] <<  8) |    \\\n                ((const uint8_t*)(x))[0])\n#endif\n#ifndef AV_WL32\n#   define AV_WL32(p, darg) do {                \\\n        unsigned d = (darg);                    \\\n        ((uint8_t*)(p))[0] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n        ((uint8_t*)(p))[2] = (d)>>16;           \\\n        ((uint8_t*)(p))[3] = (d)>>24;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RB64\n#   define AV_RB64(x)                                   \\\n    (((uint64_t)((const uint8_t*)(x))[0] << 56) |       \\\n     ((uint64_t)((const uint8_t*)(x))[1] << 48) |       \\\n     ((uint64_t)((const uint8_t*)(x))[2] << 40) |       \\\n     ((uint64_t)((const uint8_t*)(x))[3] << 32) |       \\\n     ((uint64_t)((const uint8_t*)(x))[4] << 24) |       \\\n     ((uint64_t)((const uint8_t*)(x))[5] << 16) |       \\\n     ((uint64_t)((const uint8_t*)(x))[6] <<  8) |       \\\n      (uint64_t)((const uint8_t*)(x))[7])\n#endif\n#ifndef AV_WB64\n#   define AV_WB64(p, darg) do {                \\\n        uint64_t d = (darg);                    \\\n        ((uint8_t*)(p))[7] = (d);               \\\n        ((uint8_t*)(p))[6] = (d)>>8;            \\\n        ((uint8_t*)(p))[5] = (d)>>16;           \\\n        ((uint8_t*)(p))[4] = (d)>>24;           \\\n        ((uint8_t*)(p))[3] = (d)>>32;           \\\n        ((uint8_t*)(p))[2] = (d)>>40;           \\\n        ((uint8_t*)(p))[1] = (d)>>48;           \\\n        ((uint8_t*)(p))[0] = (d)>>56;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RL64\n#   define AV_RL64(x)                                   \\\n    (((uint64_t)((const uint8_t*)(x))[7] << 56) |       \\\n     ((uint64_t)((const uint8_t*)(x))[6] << 48) |       \\\n     ((uint64_t)((const uint8_t*)(x))[5] << 40) |       \\\n     ((uint64_t)((const uint8_t*)(x))[4] << 32) |       \\\n     ((uint64_t)((const uint8_t*)(x))[3] << 24) |       \\\n     ((uint64_t)((const uint8_t*)(x))[2] << 16) |       \\\n     ((uint64_t)((const uint8_t*)(x))[1] <<  8) |       \\\n      (uint64_t)((const uint8_t*)(x))[0])\n#endif\n#ifndef AV_WL64\n#   define AV_WL64(p, darg) do {                \\\n        uint64_t d = (darg);                    \\\n        ((uint8_t*)(p))[0] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n        ((uint8_t*)(p))[2] = (d)>>16;           \\\n        ((uint8_t*)(p))[3] = (d)>>24;           \\\n        ((uint8_t*)(p))[4] = (d)>>32;           \\\n        ((uint8_t*)(p))[5] = (d)>>40;           \\\n        ((uint8_t*)(p))[6] = (d)>>48;           \\\n        ((uint8_t*)(p))[7] = (d)>>56;           \\\n    } while(0)\n#endif\n\n#if AV_HAVE_BIGENDIAN\n#   define AV_RN(s, p)    AV_RB##s(p)\n#   define AV_WN(s, p, v) AV_WB##s(p, v)\n#else\n#   define AV_RN(s, p)    AV_RL##s(p)\n#   define AV_WN(s, p, v) AV_WL##s(p, v)\n#endif\n\n#endif /* HAVE_FAST_UNALIGNED */\n\n#ifndef AV_RN16\n#   define AV_RN16(p) AV_RN(16, p)\n#endif\n\n#ifndef AV_RN32\n#   define AV_RN32(p) AV_RN(32, p)\n#endif\n\n#ifndef AV_RN64\n#   define AV_RN64(p) AV_RN(64, p)\n#endif\n\n#ifndef AV_WN16\n#   define AV_WN16(p, v) AV_WN(16, p, v)\n#endif\n\n#ifndef AV_WN32\n#   define AV_WN32(p, v) AV_WN(32, p, v)\n#endif\n\n#ifndef AV_WN64\n#   define AV_WN64(p, v) AV_WN(64, p, v)\n#endif\n\n#if AV_HAVE_BIGENDIAN\n#   define AV_RB(s, p)    AV_RN##s(p)\n#   define AV_WB(s, p, v) AV_WN##s(p, v)\n#   define AV_RL(s, p)    av_bswap##s(AV_RN##s(p))\n#   define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v))\n#else\n#   define AV_RB(s, p)    av_bswap##s(AV_RN##s(p))\n#   define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v))\n#   define AV_RL(s, p)    AV_RN##s(p)\n#   define AV_WL(s, p, v) AV_WN##s(p, v)\n#endif\n\n#define AV_RB8(x)     (((const uint8_t*)(x))[0])\n#define AV_WB8(p, d)  do { ((uint8_t*)(p))[0] = (d); } while(0)\n\n#define AV_RL8(x)     AV_RB8(x)\n#define AV_WL8(p, d)  AV_WB8(p, d)\n\n#ifndef AV_RB16\n#   define AV_RB16(p)    AV_RB(16, p)\n#endif\n#ifndef AV_WB16\n#   define AV_WB16(p, v) AV_WB(16, p, v)\n#endif\n\n#ifndef AV_RL16\n#   define AV_RL16(p)    AV_RL(16, p)\n#endif\n#ifndef AV_WL16\n#   define AV_WL16(p, v) AV_WL(16, p, v)\n#endif\n\n#ifndef AV_RB32\n#   define AV_RB32(p)    AV_RB(32, p)\n#endif\n#ifndef AV_WB32\n#   define AV_WB32(p, v) AV_WB(32, p, v)\n#endif\n\n#ifndef AV_RL32\n#   define AV_RL32(p)    AV_RL(32, p)\n#endif\n#ifndef AV_WL32\n#   define AV_WL32(p, v) AV_WL(32, p, v)\n#endif\n\n#ifndef AV_RB64\n#   define AV_RB64(p)    AV_RB(64, p)\n#endif\n#ifndef AV_WB64\n#   define AV_WB64(p, v) AV_WB(64, p, v)\n#endif\n\n#ifndef AV_RL64\n#   define AV_RL64(p)    AV_RL(64, p)\n#endif\n#ifndef AV_WL64\n#   define AV_WL64(p, v) AV_WL(64, p, v)\n#endif\n\n#ifndef AV_RB24\n#   define AV_RB24(x)                           \\\n    ((((const uint8_t*)(x))[0] << 16) |         \\\n     (((const uint8_t*)(x))[1] <<  8) |         \\\n      ((const uint8_t*)(x))[2])\n#endif\n#ifndef AV_WB24\n#   define AV_WB24(p, d) do {                   \\\n        ((uint8_t*)(p))[2] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n        ((uint8_t*)(p))[0] = (d)>>16;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RL24\n#   define AV_RL24(x)                           \\\n    ((((const uint8_t*)(x))[2] << 16) |         \\\n     (((const uint8_t*)(x))[1] <<  8) |         \\\n      ((const uint8_t*)(x))[0])\n#endif\n#ifndef AV_WL24\n#   define AV_WL24(p, d) do {                   \\\n        ((uint8_t*)(p))[0] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n        ((uint8_t*)(p))[2] = (d)>>16;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RB48\n#   define AV_RB48(x)                                     \\\n    (((uint64_t)((const uint8_t*)(x))[0] << 40) |         \\\n     ((uint64_t)((const uint8_t*)(x))[1] << 32) |         \\\n     ((uint64_t)((const uint8_t*)(x))[2] << 24) |         \\\n     ((uint64_t)((const uint8_t*)(x))[3] << 16) |         \\\n     ((uint64_t)((const uint8_t*)(x))[4] <<  8) |         \\\n      (uint64_t)((const uint8_t*)(x))[5])\n#endif\n#ifndef AV_WB48\n#   define AV_WB48(p, darg) do {                \\\n        uint64_t d = (darg);                    \\\n        ((uint8_t*)(p))[5] = (d);               \\\n        ((uint8_t*)(p))[4] = (d)>>8;            \\\n        ((uint8_t*)(p))[3] = (d)>>16;           \\\n        ((uint8_t*)(p))[2] = (d)>>24;           \\\n        ((uint8_t*)(p))[1] = (d)>>32;           \\\n        ((uint8_t*)(p))[0] = (d)>>40;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RL48\n#   define AV_RL48(x)                                     \\\n    (((uint64_t)((const uint8_t*)(x))[5] << 40) |         \\\n     ((uint64_t)((const uint8_t*)(x))[4] << 32) |         \\\n     ((uint64_t)((const uint8_t*)(x))[3] << 24) |         \\\n     ((uint64_t)((const uint8_t*)(x))[2] << 16) |         \\\n     ((uint64_t)((const uint8_t*)(x))[1] <<  8) |         \\\n      (uint64_t)((const uint8_t*)(x))[0])\n#endif\n#ifndef AV_WL48\n#   define AV_WL48(p, darg) do {                \\\n        uint64_t d = (darg);                    \\\n        ((uint8_t*)(p))[0] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n        ((uint8_t*)(p))[2] = (d)>>16;           \\\n        ((uint8_t*)(p))[3] = (d)>>24;           \\\n        ((uint8_t*)(p))[4] = (d)>>32;           \\\n        ((uint8_t*)(p))[5] = (d)>>40;           \\\n    } while(0)\n#endif\n\n/*\n * The AV_[RW]NA macros access naturally aligned data\n * in a type-safe way.\n */\n\n#define AV_RNA(s, p)    (((const av_alias##s*)(p))->u##s)\n#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))\n\n#ifndef AV_RN16A\n#   define AV_RN16A(p) AV_RNA(16, p)\n#endif\n\n#ifndef AV_RN32A\n#   define AV_RN32A(p) AV_RNA(32, p)\n#endif\n\n#ifndef AV_RN64A\n#   define AV_RN64A(p) AV_RNA(64, p)\n#endif\n\n#ifndef AV_WN16A\n#   define AV_WN16A(p, v) AV_WNA(16, p, v)\n#endif\n\n#ifndef AV_WN32A\n#   define AV_WN32A(p, v) AV_WNA(32, p, v)\n#endif\n\n#ifndef AV_WN64A\n#   define AV_WN64A(p, v) AV_WNA(64, p, v)\n#endif\n\n/*\n * The AV_COPYxxU macros are suitable for copying data to/from unaligned\n * memory locations.\n */\n\n#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s));\n\n#ifndef AV_COPY16U\n#   define AV_COPY16U(d, s) AV_COPYU(16, d, s)\n#endif\n\n#ifndef AV_COPY32U\n#   define AV_COPY32U(d, s) AV_COPYU(32, d, s)\n#endif\n\n#ifndef AV_COPY64U\n#   define AV_COPY64U(d, s) AV_COPYU(64, d, s)\n#endif\n\n#ifndef AV_COPY128U\n#   define AV_COPY128U(d, s)                                    \\\n    do {                                                        \\\n        AV_COPY64U(d, s);                                       \\\n        AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8);     \\\n    } while(0)\n#endif\n\n/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be\n * naturally aligned. They may be implemented using MMX,\n * so emms_c() must be called before using any float code\n * afterwards.\n */\n\n#define AV_COPY(n, d, s) \\\n    (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)\n\n#ifndef AV_COPY16\n#   define AV_COPY16(d, s) AV_COPY(16, d, s)\n#endif\n\n#ifndef AV_COPY32\n#   define AV_COPY32(d, s) AV_COPY(32, d, s)\n#endif\n\n#ifndef AV_COPY64\n#   define AV_COPY64(d, s) AV_COPY(64, d, s)\n#endif\n\n#ifndef AV_COPY128\n#   define AV_COPY128(d, s)                    \\\n    do {                                       \\\n        AV_COPY64(d, s);                       \\\n        AV_COPY64((char*)(d)+8, (char*)(s)+8); \\\n    } while(0)\n#endif\n\n#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))\n\n#ifndef AV_SWAP64\n#   define AV_SWAP64(a, b) AV_SWAP(64, a, b)\n#endif\n\n#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)\n\n#ifndef AV_ZERO16\n#   define AV_ZERO16(d) AV_ZERO(16, d)\n#endif\n\n#ifndef AV_ZERO32\n#   define AV_ZERO32(d) AV_ZERO(32, d)\n#endif\n\n#ifndef AV_ZERO64\n#   define AV_ZERO64(d) AV_ZERO(64, d)\n#endif\n\n#ifndef AV_ZERO128\n#   define AV_ZERO128(d)         \\\n    do {                         \\\n        AV_ZERO64(d);            \\\n        AV_ZERO64((char*)(d)+8); \\\n    } while(0)\n#endif\n\n#endif /* AVUTIL_INTREADWRITE_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/lfg.h",
    "content": "/*\n * Lagged Fibonacci PRNG\n * Copyright (c) 2008 Michael Niedermayer\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_LFG_H\n#define AVUTIL_LFG_H\n\ntypedef struct AVLFG {\n    unsigned int state[64];\n    int index;\n} AVLFG;\n\nvoid av_lfg_init(AVLFG *c, unsigned int seed);\n\n/**\n * Get the next random unsigned 32-bit number using an ALFG.\n *\n * Please also consider a simple LCG like state= state*1664525+1013904223,\n * it may be good enough and faster for your specific use case.\n */\nstatic inline unsigned int av_lfg_get(AVLFG *c){\n    c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63];\n    return c->state[c->index++ & 63];\n}\n\n/**\n * Get the next random unsigned 32-bit number using a MLFG.\n *\n * Please also consider av_lfg_get() above, it is faster.\n */\nstatic inline unsigned int av_mlfg_get(AVLFG *c){\n    unsigned int a= c->state[(c->index-55) & 63];\n    unsigned int b= c->state[(c->index-24) & 63];\n    return c->state[c->index++ & 63] = 2*a*b+a+b;\n}\n\n/**\n * Get the next two numbers generated by a Box-Muller Gaussian\n * generator using the random numbers issued by lfg.\n *\n * @param out array where the two generated numbers are placed\n */\nvoid av_bmg_get(AVLFG *lfg, double out[2]);\n\n#endif /* AVUTIL_LFG_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/log.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_LOG_H\n#define AVUTIL_LOG_H\n\n#include <stdarg.h>\n#include \"avutil.h\"\n#include \"attributes.h\"\n\ntypedef enum {\n    AV_CLASS_CATEGORY_NA = 0,\n    AV_CLASS_CATEGORY_INPUT,\n    AV_CLASS_CATEGORY_OUTPUT,\n    AV_CLASS_CATEGORY_MUXER,\n    AV_CLASS_CATEGORY_DEMUXER,\n    AV_CLASS_CATEGORY_ENCODER,\n    AV_CLASS_CATEGORY_DECODER,\n    AV_CLASS_CATEGORY_FILTER,\n    AV_CLASS_CATEGORY_BITSTREAM_FILTER,\n    AV_CLASS_CATEGORY_SWSCALER,\n    AV_CLASS_CATEGORY_SWRESAMPLER,\n    AV_CLASS_CATEGORY_NB, ///< not part of ABI/API\n}AVClassCategory;\n\nstruct AVOptionRanges;\n\n/**\n * Describe the class of an AVClass context structure. That is an\n * arbitrary struct of which the first field is a pointer to an\n * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).\n */\ntypedef struct AVClass {\n    /**\n     * The name of the class; usually it is the same name as the\n     * context structure type to which the AVClass is associated.\n     */\n    const char* class_name;\n\n    /**\n     * A pointer to a function which returns the name of a context\n     * instance ctx associated with the class.\n     */\n    const char* (*item_name)(void* ctx);\n\n    /**\n     * a pointer to the first option specified in the class if any or NULL\n     *\n     * @see av_set_default_options()\n     */\n    const struct AVOption *option;\n\n    /**\n     * LIBAVUTIL_VERSION with which this structure was created.\n     * This is used to allow fields to be added without requiring major\n     * version bumps everywhere.\n     */\n\n    int version;\n\n    /**\n     * Offset in the structure where log_level_offset is stored.\n     * 0 means there is no such variable\n     */\n    int log_level_offset_offset;\n\n    /**\n     * Offset in the structure where a pointer to the parent context for\n     * logging is stored. For example a decoder could pass its AVCodecContext\n     * to eval as such a parent context, which an av_log() implementation\n     * could then leverage to display the parent context.\n     * The offset can be NULL.\n     */\n    int parent_log_context_offset;\n\n    /**\n     * Return next AVOptions-enabled child or NULL\n     */\n    void* (*child_next)(void *obj, void *prev);\n\n    /**\n     * Return an AVClass corresponding to the next potential\n     * AVOptions-enabled child.\n     *\n     * The difference between child_next and this is that\n     * child_next iterates over _already existing_ objects, while\n     * child_class_next iterates over _all possible_ children.\n     */\n    const struct AVClass* (*child_class_next)(const struct AVClass *prev);\n\n    /**\n     * Category used for visualization (like color)\n     * This is only set if the category is equal for all objects using this class.\n     * available since version (51 << 16 | 56 << 8 | 100)\n     */\n    AVClassCategory category;\n\n    /**\n     * Callback to return the category.\n     * available since version (51 << 16 | 59 << 8 | 100)\n     */\n    AVClassCategory (*get_category)(void* ctx);\n\n    /**\n     * Callback to return the supported/allowed ranges.\n     * available since version (52.12)\n     */\n    int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags);\n} AVClass;\n\n/**\n * @addtogroup lavu_log\n *\n * @{\n *\n * @defgroup lavu_log_constants Logging Constants\n *\n * @{\n */\n\n/**\n * Print no output.\n */\n#define AV_LOG_QUIET    -8\n\n/**\n * Something went really wrong and we will crash now.\n */\n#define AV_LOG_PANIC     0\n\n/**\n * Something went wrong and recovery is not possible.\n * For example, no header was found for a format which depends\n * on headers or an illegal combination of parameters is used.\n */\n#define AV_LOG_FATAL     8\n\n/**\n * Something went wrong and cannot losslessly be recovered.\n * However, not all future data is affected.\n */\n#define AV_LOG_ERROR    16\n\n/**\n * Something somehow does not look correct. This may or may not\n * lead to problems. An example would be the use of '-vstrict -2'.\n */\n#define AV_LOG_WARNING  24\n\n/**\n * Standard information.\n */\n#define AV_LOG_INFO     32\n\n/**\n * Detailed information.\n */\n#define AV_LOG_VERBOSE  40\n\n/**\n * Stuff which is only useful for libav* developers.\n */\n#define AV_LOG_DEBUG    48\n\n#define AV_LOG_MAX_OFFSET (AV_LOG_DEBUG - AV_LOG_QUIET)\n\n/**\n * @}\n */\n\n/**\n * Send the specified message to the log if the level is less than or equal\n * to the current av_log_level. By default, all logging messages are sent to\n * stderr. This behavior can be altered by setting a different logging callback\n * function.\n * @see av_log_set_callback\n *\n * @param avcl A pointer to an arbitrary struct of which the first field is a\n *        pointer to an AVClass struct.\n * @param level The importance level of the message expressed using a @ref\n *        lavu_log_constants \"Logging Constant\".\n * @param fmt The format string (printf-compatible) that specifies how\n *        subsequent arguments are converted to output.\n */\nvoid av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4);\n\n\n/**\n * Send the specified message to the log if the level is less than or equal\n * to the current av_log_level. By default, all logging messages are sent to\n * stderr. This behavior can be altered by setting a different logging callback\n * function.\n * @see av_log_set_callback\n *\n * @param avcl A pointer to an arbitrary struct of which the first field is a\n *        pointer to an AVClass struct.\n * @param level The importance level of the message expressed using a @ref\n *        lavu_log_constants \"Logging Constant\".\n * @param fmt The format string (printf-compatible) that specifies how\n *        subsequent arguments are converted to output.\n * @param vl The arguments referenced by the format string.\n */\nvoid av_vlog(void *avcl, int level, const char *fmt, va_list vl);\n\n/**\n * Get the current log level\n *\n * @see lavu_log_constants\n *\n * @return Current log level\n */\nint av_log_get_level(void);\n\n/**\n * Set the log level\n *\n * @see lavu_log_constants\n *\n * @param level Logging level\n */\nvoid av_log_set_level(int level);\n\n/**\n * Set the logging callback\n *\n * @note The callback must be thread safe, even if the application does not use\n *       threads itself as some codecs are multithreaded.\n *\n * @see av_log_default_callback\n *\n * @param callback A logging function with a compatible signature.\n */\nvoid av_log_set_callback(void (*callback)(void*, int, const char*, va_list));\n\n/**\n * Default logging callback\n *\n * It prints the message to stderr, optionally colorizing it.\n *\n * @param avcl A pointer to an arbitrary struct of which the first field is a\n *        pointer to an AVClass struct.\n * @param level The importance level of the message expressed using a @ref\n *        lavu_log_constants \"Logging Constant\".\n * @param fmt The format string (printf-compatible) that specifies how\n *        subsequent arguments are converted to output.\n * @param vl The arguments referenced by the format string.\n */\nvoid av_log_default_callback(void *avcl, int level, const char *fmt,\n                             va_list vl);\n\n/**\n * Return the context name\n *\n * @param  ctx The AVClass context\n *\n * @return The AVClass class_name\n */\nconst char* av_default_item_name(void* ctx);\nAVClassCategory av_default_get_category(void *ptr);\n\n/**\n * Format a line of log the same way as the default callback.\n * @param line          buffer to receive the formated line\n * @param line_size     size of the buffer\n * @param print_prefix  used to store whether the prefix must be printed;\n *                      must point to a persistent integer initially set to 1\n */\nvoid av_log_format_line(void *ptr, int level, const char *fmt, va_list vl,\n                        char *line, int line_size, int *print_prefix);\n\n/**\n * av_dlog macros\n * Useful to print debug messages that shouldn't get compiled in normally.\n */\n\n#ifdef DEBUG\n#    define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)\n#else\n#    define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)\n#endif\n\n/**\n * Skip repeated messages, this requires the user app to use av_log() instead of\n * (f)printf as the 2 would otherwise interfere and lead to\n * \"Last message repeated x times\" messages below (f)printf messages with some\n * bad luck.\n * Also to receive the last, \"last repeated\" line if any, the user app must\n * call av_log(NULL, AV_LOG_QUIET, \"%s\", \"\"); at the end\n */\n#define AV_LOG_SKIP_REPEATED 1\nvoid av_log_set_flags(int arg);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_LOG_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/lzo.h",
    "content": "/*\n * LZO 1x decompression\n * copyright (c) 2006 Reimar Doeffinger\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_LZO_H\n#define AVUTIL_LZO_H\n\n/**\n * @defgroup lavu_lzo LZO\n * @ingroup lavu_crypto\n *\n * @{\n */\n\n#include <stdint.h>\n\n/** @name Error flags returned by av_lzo1x_decode\n * @{ */\n/// end of the input buffer reached before decoding finished\n#define AV_LZO_INPUT_DEPLETED  1\n/// decoded data did not fit into output buffer\n#define AV_LZO_OUTPUT_FULL     2\n/// a reference to previously decoded data was wrong\n#define AV_LZO_INVALID_BACKPTR 4\n/// a non-specific error in the compressed bitstream\n#define AV_LZO_ERROR           8\n/** @} */\n\n#define AV_LZO_INPUT_PADDING   8\n#define AV_LZO_OUTPUT_PADDING 12\n\n/**\n * @brief Decodes LZO 1x compressed data.\n * @param out output buffer\n * @param outlen size of output buffer, number of bytes left are returned here\n * @param in input buffer\n * @param inlen size of input buffer, number of bytes left are returned here\n * @return 0 on success, otherwise a combination of the error flags above\n *\n * Make sure all buffers are appropriately padded, in must provide\n * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes.\n */\nint av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_LZO_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/macros.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * @ingroup lavu\n * Utility Preprocessor macros\n */\n\n#ifndef AVUTIL_MACROS_H\n#define AVUTIL_MACROS_H\n\n/**\n * @addtogroup preproc_misc Preprocessor String Macros\n *\n * String manipulation macros\n *\n * @{\n */\n\n#define AV_STRINGIFY(s)         AV_TOSTRING(s)\n#define AV_TOSTRING(s) #s\n\n#define AV_GLUE(a, b) a ## b\n#define AV_JOIN(a, b) AV_GLUE(a, b)\n\n/**\n * @}\n */\n\n#define AV_PRAGMA(s) _Pragma(#s)\n\n#endif /* AVUTIL_MACROS_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/mathematics.h",
    "content": "/*\n * copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_MATHEMATICS_H\n#define AVUTIL_MATHEMATICS_H\n\n#include <stdint.h>\n#include <math.h>\n#include \"attributes.h\"\n#include \"rational.h\"\n#include \"intfloat.h\"\n\n#ifndef M_E\n#define M_E            2.7182818284590452354   /* e */\n#endif\n#ifndef M_LN2\n#define M_LN2          0.69314718055994530942  /* log_e 2 */\n#endif\n#ifndef M_LN10\n#define M_LN10         2.30258509299404568402  /* log_e 10 */\n#endif\n#ifndef M_LOG2_10\n#define M_LOG2_10      3.32192809488736234787  /* log_2 10 */\n#endif\n#ifndef M_PHI\n#define M_PHI          1.61803398874989484820   /* phi / golden ratio */\n#endif\n#ifndef M_PI\n#define M_PI           3.14159265358979323846  /* pi */\n#endif\n#ifndef M_PI_2\n#define M_PI_2         1.57079632679489661923  /* pi/2 */\n#endif\n#ifndef M_SQRT1_2\n#define M_SQRT1_2      0.70710678118654752440  /* 1/sqrt(2) */\n#endif\n#ifndef M_SQRT2\n#define M_SQRT2        1.41421356237309504880  /* sqrt(2) */\n#endif\n#ifndef NAN\n#define NAN            av_int2float(0x7fc00000)\n#endif\n#ifndef INFINITY\n#define INFINITY       av_int2float(0x7f800000)\n#endif\n\n/**\n * @addtogroup lavu_math\n * @{\n */\n\n\nenum AVRounding {\n    AV_ROUND_ZERO     = 0, ///< Round toward zero.\n    AV_ROUND_INF      = 1, ///< Round away from zero.\n    AV_ROUND_DOWN     = 2, ///< Round toward -infinity.\n    AV_ROUND_UP       = 3, ///< Round toward +infinity.\n    AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero.\n    AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE\n};\n\n/**\n * Return the greatest common divisor of a and b.\n * If both a and b are 0 or either or both are <0 then behavior is\n * undefined.\n */\nint64_t av_const av_gcd(int64_t a, int64_t b);\n\n/**\n * Rescale a 64-bit integer with rounding to nearest.\n * A simple a*b/c isn't possible as it can overflow.\n */\nint64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;\n\n/**\n * Rescale a 64-bit integer with specified rounding.\n * A simple a*b/c isn't possible as it can overflow.\n *\n * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is\n *         INT64_MIN or INT64_MAX then a is passed through unchanged.\n */\nint64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const;\n\n/**\n * Rescale a 64-bit integer by 2 rational numbers.\n */\nint64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;\n\n/**\n * Rescale a 64-bit integer by 2 rational numbers with specified rounding.\n *\n * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is\n *         INT64_MIN or INT64_MAX then a is passed through unchanged.\n */\nint64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,\n                         enum AVRounding) av_const;\n\n/**\n * Compare 2 timestamps each in its own timebases.\n * The result of the function is undefined if one of the timestamps\n * is outside the int64_t range when represented in the others timebase.\n * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position\n */\nint av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);\n\n/**\n * Compare 2 integers modulo mod.\n * That is we compare integers a and b for which only the least\n * significant log2(mod) bits are known.\n *\n * @param mod must be a power of 2\n * @return a negative value if a is smaller than b\n *         a positive value if a is greater than b\n *         0                if a equals          b\n */\nint64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);\n\n/**\n * Rescale a timestamp while preserving known durations.\n *\n * @param in_ts Input timestamp\n * @param in_tb Input timebase\n * @param fs_tb Duration and *last timebase\n * @param duration duration till the next call\n * @param out_tb Output timebase\n */\nint64_t av_rescale_delta(AVRational in_tb, int64_t in_ts,  AVRational fs_tb, int duration, int64_t *last, AVRational out_tb);\n\n/**\n * Add a value to a timestamp.\n *\n * This function gurantees that when the same value is repeatly added that\n * no accumulation of rounding errors occurs.\n *\n * @param ts Input timestamp\n * @param ts_tb Input timestamp timebase\n * @param inc value to add to ts\n * @param inc_tb inc timebase\n */\nint64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc);\n\n\n    /**\n * @}\n */\n\n#endif /* AVUTIL_MATHEMATICS_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/md5.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_MD5_H\n#define AVUTIL_MD5_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n/**\n * @defgroup lavu_md5 MD5\n * @ingroup lavu_crypto\n * @{\n */\n\nextern const int av_md5_size;\n\nstruct AVMD5;\n\n/**\n * Allocate an AVMD5 context.\n */\nstruct AVMD5 *av_md5_alloc(void);\n\n/**\n * Initialize MD5 hashing.\n *\n * @param ctx pointer to the function context (of size av_md5_size)\n */\nvoid av_md5_init(struct AVMD5 *ctx);\n\n/**\n * Update hash value.\n *\n * @param ctx hash function context\n * @param src input data to update hash with\n * @param len input data length\n */\nvoid av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len);\n\n/**\n * Finish hashing and output digest value.\n *\n * @param ctx hash function context\n * @param dst buffer where output digest value is stored\n */\nvoid av_md5_final(struct AVMD5 *ctx, uint8_t *dst);\n\n/**\n * Hash an array of data.\n *\n * @param dst The output buffer to write the digest into\n * @param src The data to hash\n * @param len The length of the data, in bytes\n */\nvoid av_md5_sum(uint8_t *dst, const uint8_t *src, const int len);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_MD5_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/mem.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * memory handling functions\n */\n\n#ifndef AVUTIL_MEM_H\n#define AVUTIL_MEM_H\n\n#include <limits.h>\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"error.h\"\n#include \"avutil.h\"\n\n/**\n * @addtogroup lavu_mem\n * @{\n */\n\n\n#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)\n    #define DECLARE_ALIGNED(n,t,v)      t __attribute__ ((aligned (n))) v\n    #define DECLARE_ASM_CONST(n,t,v)    const t __attribute__ ((aligned (n))) v\n#elif defined(__TI_COMPILER_VERSION__)\n    #define DECLARE_ALIGNED(n,t,v)                      \\\n        AV_PRAGMA(DATA_ALIGN(v,n))                      \\\n        t __attribute__((aligned(n))) v\n    #define DECLARE_ASM_CONST(n,t,v)                    \\\n        AV_PRAGMA(DATA_ALIGN(v,n))                      \\\n        static const t __attribute__((aligned(n))) v\n#elif defined(__GNUC__)\n    #define DECLARE_ALIGNED(n,t,v)      t __attribute__ ((aligned (n))) v\n    #define DECLARE_ASM_CONST(n,t,v)    static const t av_used __attribute__ ((aligned (n))) v\n#elif defined(_MSC_VER)\n    #define DECLARE_ALIGNED(n,t,v)      __declspec(align(n)) t v\n    #define DECLARE_ASM_CONST(n,t,v)    __declspec(align(n)) static const t v\n#else\n    #define DECLARE_ALIGNED(n,t,v)      t v\n    #define DECLARE_ASM_CONST(n,t,v)    static const t v\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n    #define av_malloc_attrib __attribute__((__malloc__))\n#else\n    #define av_malloc_attrib\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(4,3)\n    #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))\n#else\n    #define av_alloc_size(...)\n#endif\n\n/**\n * Allocate a block of size bytes with alignment suitable for all\n * memory accesses (including vectors if available on the CPU).\n * @param size Size in bytes for the memory block to be allocated.\n * @return Pointer to the allocated block, NULL if the block cannot\n * be allocated.\n * @see av_mallocz()\n */\nvoid *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);\n\n/**\n * Allocate a block of size * nmemb bytes with av_malloc().\n * @param nmemb Number of elements\n * @param size Size of the single element\n * @return Pointer to the allocated block, NULL if the block cannot\n * be allocated.\n * @see av_malloc()\n */\nav_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size)\n{\n    if (!size || nmemb >= INT_MAX / size)\n        return NULL;\n    return av_malloc(nmemb * size);\n}\n\n/**\n * Allocate or reallocate a block of memory.\n * If ptr is NULL and size > 0, allocate a new block. If\n * size is zero, free the memory block pointed to by ptr.\n * @param ptr Pointer to a memory block already allocated with\n * av_realloc() or NULL.\n * @param size Size in bytes of the memory block to be allocated or\n * reallocated.\n * @return Pointer to a newly-reallocated block or NULL if the block\n * cannot be reallocated or the function is used to free the memory block.\n * @warning Pointers originating from the av_malloc() family of functions must\n *          not be passed to av_realloc(). The former can be implemented using\n *          memalign() (or other functions), and there is no guarantee that\n *          pointers from such functions can be passed to realloc() at all.\n *          The situation is undefined according to POSIX and may crash with\n *          some libc implementations.\n * @see av_fast_realloc()\n */\nvoid *av_realloc(void *ptr, size_t size) av_alloc_size(2);\n\n/**\n * Allocate or reallocate a block of memory.\n * This function does the same thing as av_realloc, except:\n * - It takes two arguments and checks the result of the multiplication for\n *   integer overflow.\n * - It frees the input block in case of failure, thus avoiding the memory\n *   leak with the classic \"buf = realloc(buf); if (!buf) return -1;\".\n */\nvoid *av_realloc_f(void *ptr, size_t nelem, size_t elsize);\n\n/**\n * Allocate or reallocate a block of memory.\n * If *ptr is NULL and size > 0, allocate a new block. If\n * size is zero, free the memory block pointed to by ptr.\n * @param   ptr Pointer to a pointer to a memory block already allocated\n *          with av_realloc(), or pointer to a pointer to NULL.\n *          The pointer is updated on success, or freed on failure.\n * @param   size Size in bytes for the memory block to be allocated or\n *          reallocated\n * @return  Zero on success, an AVERROR error code on failure.\n * @warning Pointers originating from the av_malloc() family of functions must\n *          not be passed to av_reallocp(). The former can be implemented using\n *          memalign() (or other functions), and there is no guarantee that\n *          pointers from such functions can be passed to realloc() at all.\n *          The situation is undefined according to POSIX and may crash with\n *          some libc implementations.\n */\nint av_reallocp(void *ptr, size_t size);\n\n/**\n * Allocate or reallocate an array.\n * If ptr is NULL and nmemb > 0, allocate a new block. If\n * nmemb is zero, free the memory block pointed to by ptr.\n * @param ptr Pointer to a memory block already allocated with\n * av_realloc() or NULL.\n * @param nmemb Number of elements\n * @param size Size of the single element\n * @return Pointer to a newly-reallocated block or NULL if the block\n * cannot be reallocated or the function is used to free the memory block.\n * @warning Pointers originating from the av_malloc() family of functions must\n *          not be passed to av_realloc(). The former can be implemented using\n *          memalign() (or other functions), and there is no guarantee that\n *          pointers from such functions can be passed to realloc() at all.\n *          The situation is undefined according to POSIX and may crash with\n *          some libc implementations.\n */\nav_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size);\n\n/**\n * Allocate or reallocate an array through a pointer to a pointer.\n * If *ptr is NULL and nmemb > 0, allocate a new block. If\n * nmemb is zero, free the memory block pointed to by ptr.\n * @param ptr Pointer to a pointer to a memory block already allocated\n * with av_realloc(), or pointer to a pointer to NULL.\n * The pointer is updated on success, or freed on failure.\n * @param nmemb Number of elements\n * @param size Size of the single element\n * @return Zero on success, an AVERROR error code on failure.\n * @warning Pointers originating from the av_malloc() family of functions must\n *          not be passed to av_realloc(). The former can be implemented using\n *          memalign() (or other functions), and there is no guarantee that\n *          pointers from such functions can be passed to realloc() at all.\n *          The situation is undefined according to POSIX and may crash with\n *          some libc implementations.\n */\nav_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size);\n\n/**\n * Free a memory block which has been allocated with av_malloc(z)() or\n * av_realloc().\n * @param ptr Pointer to the memory block which should be freed.\n * @note ptr = NULL is explicitly allowed.\n * @note It is recommended that you use av_freep() instead.\n * @see av_freep()\n */\nvoid av_free(void *ptr);\n\n/**\n * Allocate a block of size bytes with alignment suitable for all\n * memory accesses (including vectors if available on the CPU) and\n * zero all the bytes of the block.\n * @param size Size in bytes for the memory block to be allocated.\n * @return Pointer to the allocated block, NULL if it cannot be allocated.\n * @see av_malloc()\n */\nvoid *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);\n\n/**\n * Allocate a block of nmemb * size bytes with alignment suitable for all\n * memory accesses (including vectors if available on the CPU) and\n * zero all the bytes of the block.\n * The allocation will fail if nmemb * size is greater than or equal\n * to INT_MAX.\n * @param nmemb\n * @param size\n * @return Pointer to the allocated block, NULL if it cannot be allocated.\n */\nvoid *av_calloc(size_t nmemb, size_t size) av_malloc_attrib;\n\n/**\n * Allocate a block of size * nmemb bytes with av_mallocz().\n * @param nmemb Number of elements\n * @param size Size of the single element\n * @return Pointer to the allocated block, NULL if the block cannot\n * be allocated.\n * @see av_mallocz()\n * @see av_malloc_array()\n */\nav_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size)\n{\n    if (!size || nmemb >= INT_MAX / size)\n        return NULL;\n    return av_mallocz(nmemb * size);\n}\n\n/**\n * Duplicate the string s.\n * @param s string to be duplicated\n * @return Pointer to a newly-allocated string containing a\n * copy of s or NULL if the string cannot be allocated.\n */\nchar *av_strdup(const char *s) av_malloc_attrib;\n\n/**\n * Duplicate the buffer p.\n * @param p buffer to be duplicated\n * @return Pointer to a newly allocated buffer containing a\n * copy of p or NULL if the buffer cannot be allocated.\n */\nvoid *av_memdup(const void *p, size_t size);\n\n/**\n * Free a memory block which has been allocated with av_malloc(z)() or\n * av_realloc() and set the pointer pointing to it to NULL.\n * @param ptr Pointer to the pointer to the memory block which should\n * be freed.\n * @see av_free()\n */\nvoid av_freep(void *ptr);\n\n/**\n * Add an element to a dynamic array.\n *\n * The array to grow is supposed to be an array of pointers to\n * structures, and the element to add must be a pointer to an already\n * allocated structure.\n *\n * The array is reallocated when its size reaches powers of 2.\n * Therefore, the amortized cost of adding an element is constant.\n *\n * In case of success, the pointer to the array is updated in order to\n * point to the new grown array, and the number pointed to by nb_ptr\n * is incremented.\n * In case of failure, the array is freed, *tab_ptr is set to NULL and\n * *nb_ptr is set to 0.\n *\n * @param tab_ptr pointer to the array to grow\n * @param nb_ptr  pointer to the number of elements in the array\n * @param elem    element to add\n * @see av_dynarray2_add()\n */\nvoid av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem);\n\n/**\n * Add an element of size elem_size to a dynamic array.\n *\n * The array is reallocated when its number of elements reaches powers of 2.\n * Therefore, the amortized cost of adding an element is constant.\n *\n * In case of success, the pointer to the array is updated in order to\n * point to the new grown array, and the number pointed to by nb_ptr\n * is incremented.\n * In case of failure, the array is freed, *tab_ptr is set to NULL and\n * *nb_ptr is set to 0.\n *\n * @param tab_ptr   pointer to the array to grow\n * @param nb_ptr    pointer to the number of elements in the array\n * @param elem_size size in bytes of the elements in the array\n * @param elem_data pointer to the data of the element to add. If NULL, the space of\n *                  the new added element is not filled.\n * @return          pointer to the data of the element to copy in the new allocated space.\n *                  If NULL, the new allocated space is left uninitialized.\"\n * @see av_dynarray_add()\n */\nvoid *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,\n                       const uint8_t *elem_data);\n\n/**\n * Multiply two size_t values checking for overflow.\n * @return  0 if success, AVERROR(EINVAL) if overflow.\n */\nstatic inline int av_size_mult(size_t a, size_t b, size_t *r)\n{\n    size_t t = a * b;\n    /* Hack inspired from glibc: only try the division if nelem and elsize\n     * are both greater than sqrt(SIZE_MAX). */\n    if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b)\n        return AVERROR(EINVAL);\n    *r = t;\n    return 0;\n}\n\n/**\n * Set the maximum size that may me allocated in one block.\n */\nvoid av_max_alloc(size_t max);\n\n/**\n * deliberately overlapping memcpy implementation\n * @param dst destination buffer\n * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0\n * @param cnt number of bytes to copy, must be >= 0\n *\n * cnt > back is valid, this will copy the bytes we just copied,\n * thus creating a repeating pattern with a period length of back.\n */\nvoid av_memcpy_backptr(uint8_t *dst, int back, int cnt);\n\n/**\n * Reallocate the given block if it is not large enough, otherwise do nothing.\n *\n * @see av_realloc\n */\nvoid *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);\n\n/**\n * Allocate a buffer, reusing the given one if large enough.\n *\n * Contrary to av_fast_realloc the current buffer contents might not be\n * preserved and on error the old buffer is freed, thus no special\n * handling to avoid memleaks is necessary.\n *\n * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer\n * @param size size of the buffer *ptr points to\n * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and\n *                 *size 0 if an error occurred.\n */\nvoid av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_MEM_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/murmur3.h",
    "content": "/*\n * Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_MURMUR3_H\n#define AVUTIL_MURMUR3_H\n\n#include <stdint.h>\n\nstruct AVMurMur3 *av_murmur3_alloc(void);\nvoid av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed);\nvoid av_murmur3_init(struct AVMurMur3 *c);\nvoid av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len);\nvoid av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]);\n\n#endif /* AVUTIL_MURMUR3_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/old_pix_fmts.h",
    "content": "/*\n * copyright (c) 2006-2012 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_OLD_PIX_FMTS_H\n#define AVUTIL_OLD_PIX_FMTS_H\n\n/*\n * This header exists to prevent new pixel formats from being accidentally added\n * to the deprecated list.\n * Do not include it directly. It will be removed on next major bump\n *\n * Do not add new items to this list. Use the AVPixelFormat enum instead.\n */\n    PIX_FMT_NONE = AV_PIX_FMT_NONE,\n    PIX_FMT_YUV420P,   ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)\n    PIX_FMT_YUYV422,   ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr\n    PIX_FMT_RGB24,     ///< packed RGB 8:8:8, 24bpp, RGBRGB...\n    PIX_FMT_BGR24,     ///< packed RGB 8:8:8, 24bpp, BGRBGR...\n    PIX_FMT_YUV422P,   ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)\n    PIX_FMT_YUV444P,   ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)\n    PIX_FMT_YUV410P,   ///< planar YUV 4:1:0,  9bpp, (1 Cr & Cb sample per 4x4 Y samples)\n    PIX_FMT_YUV411P,   ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)\n    PIX_FMT_GRAY8,     ///<        Y        ,  8bpp\n    PIX_FMT_MONOWHITE, ///<        Y        ,  1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb\n    PIX_FMT_MONOBLACK, ///<        Y        ,  1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb\n    PIX_FMT_PAL8,      ///< 8 bit with PIX_FMT_RGB32 palette\n    PIX_FMT_YUVJ420P,  ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range\n    PIX_FMT_YUVJ422P,  ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range\n    PIX_FMT_YUVJ444P,  ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range\n#if FF_API_XVMC\n    PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing\n    PIX_FMT_XVMC_MPEG2_IDCT,\n#endif /* FF_API_XVMC */\n    PIX_FMT_UYVY422,   ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1\n    PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3\n    PIX_FMT_BGR8,      ///< packed RGB 3:3:2,  8bpp, (msb)2B 3G 3R(lsb)\n    PIX_FMT_BGR4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits\n    PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1B 2G 1R(lsb)\n    PIX_FMT_RGB8,      ///< packed RGB 3:3:2,  8bpp, (msb)2R 3G 3B(lsb)\n    PIX_FMT_RGB4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits\n    PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1R 2G 1B(lsb)\n    PIX_FMT_NV12,      ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)\n    PIX_FMT_NV21,      ///< as above, but U and V bytes are swapped\n\n    PIX_FMT_ARGB,      ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...\n    PIX_FMT_RGBA,      ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...\n    PIX_FMT_ABGR,      ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...\n    PIX_FMT_BGRA,      ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...\n\n    PIX_FMT_GRAY16BE,  ///<        Y        , 16bpp, big-endian\n    PIX_FMT_GRAY16LE,  ///<        Y        , 16bpp, little-endian\n    PIX_FMT_YUV440P,   ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)\n    PIX_FMT_YUVJ440P,  ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range\n    PIX_FMT_YUVA420P,  ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)\n#if FF_API_VDPAU\n    PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n#endif\n    PIX_FMT_RGB48BE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian\n    PIX_FMT_RGB48LE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian\n\n    PIX_FMT_RGB565BE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), big-endian\n    PIX_FMT_RGB565LE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), little-endian\n    PIX_FMT_RGB555BE,  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0\n    PIX_FMT_RGB555LE,  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0\n\n    PIX_FMT_BGR565BE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), big-endian\n    PIX_FMT_BGR565LE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), little-endian\n    PIX_FMT_BGR555BE,  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1\n    PIX_FMT_BGR555LE,  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1\n\n    PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers\n    PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers\n    PIX_FMT_VAAPI_VLD,  ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n\n    PIX_FMT_YUV420P16LE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    PIX_FMT_YUV420P16BE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    PIX_FMT_YUV422P16LE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    PIX_FMT_YUV422P16BE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    PIX_FMT_YUV444P16LE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    PIX_FMT_YUV444P16BE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n#if FF_API_VDPAU\n    PIX_FMT_VDPAU_MPEG4,  ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n#endif\n    PIX_FMT_DXVA2_VLD,    ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer\n\n    PIX_FMT_RGB444LE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0\n    PIX_FMT_RGB444BE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0\n    PIX_FMT_BGR444LE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1\n    PIX_FMT_BGR444BE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1\n    PIX_FMT_GRAY8A,    ///< 8bit gray, 8bit alpha\n    PIX_FMT_BGR48BE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian\n    PIX_FMT_BGR48LE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian\n\n    //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus\n    //If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately\n    //is better\n    PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    PIX_FMT_VDA_VLD,    ///< hardware decoding through VDA\n\n#ifdef AV_PIX_FMT_ABI_GIT_MASTER\n    PIX_FMT_RGBA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n    PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n#endif\n    PIX_FMT_GBRP,      ///< planar GBR 4:4:4 24bpp\n    PIX_FMT_GBRP9BE,   ///< planar GBR 4:4:4 27bpp, big endian\n    PIX_FMT_GBRP9LE,   ///< planar GBR 4:4:4 27bpp, little endian\n    PIX_FMT_GBRP10BE,  ///< planar GBR 4:4:4 30bpp, big endian\n    PIX_FMT_GBRP10LE,  ///< planar GBR 4:4:4 30bpp, little endian\n    PIX_FMT_GBRP16BE,  ///< planar GBR 4:4:4 48bpp, big endian\n    PIX_FMT_GBRP16LE,  ///< planar GBR 4:4:4 48bpp, little endian\n\n#ifndef AV_PIX_FMT_ABI_GIT_MASTER\n    PIX_FMT_RGBA64BE=0x123,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n    PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n#endif\n    PIX_FMT_0RGB=0x123+4,      ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...\n    PIX_FMT_RGB0,      ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...\n    PIX_FMT_0BGR,      ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...\n    PIX_FMT_BGR0,      ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...\n    PIX_FMT_YUVA444P,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)\n    PIX_FMT_YUVA422P,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)\n\n    PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    PIX_FMT_GBRP12BE,    ///< planar GBR 4:4:4 36bpp, big endian\n    PIX_FMT_GBRP12LE,    ///< planar GBR 4:4:4 36bpp, little endian\n    PIX_FMT_GBRP14BE,    ///< planar GBR 4:4:4 42bpp, big endian\n    PIX_FMT_GBRP14LE,    ///< planar GBR 4:4:4 42bpp, little endian\n\n    PIX_FMT_NB,        ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions\n#endif /* AVUTIL_OLD_PIX_FMTS_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/opt.h",
    "content": "/*\n * AVOptions\n * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_OPT_H\n#define AVUTIL_OPT_H\n\n/**\n * @file\n * AVOptions\n */\n\n#include \"rational.h\"\n#include \"avutil.h\"\n#include \"dict.h\"\n#include \"log.h\"\n#include \"pixfmt.h\"\n#include \"samplefmt.h\"\n\n/**\n * @defgroup avoptions AVOptions\n * @ingroup lavu_data\n * @{\n * AVOptions provide a generic system to declare options on arbitrary structs\n * (\"objects\"). An option can have a help text, a type and a range of possible\n * values. Options may then be enumerated, read and written to.\n *\n * @section avoptions_implement Implementing AVOptions\n * This section describes how to add AVOptions capabilities to a struct.\n *\n * All AVOptions-related information is stored in an AVClass. Therefore\n * the first member of the struct should be a pointer to an AVClass describing it.\n * The option field of the AVClass must be set to a NULL-terminated static array\n * of AVOptions. Each AVOption must have a non-empty name, a type, a default\n * value and for number-type AVOptions also a range of allowed values. It must\n * also declare an offset in bytes from the start of the struct, where the field\n * associated with this AVOption is located. Other fields in the AVOption struct\n * should also be set when applicable, but are not required.\n *\n * The following example illustrates an AVOptions-enabled struct:\n * @code\n * typedef struct test_struct {\n *     AVClass *class;\n *     int      int_opt;\n *     char    *str_opt;\n *     uint8_t *bin_opt;\n *     int      bin_len;\n * } test_struct;\n *\n * static const AVOption test_options[] = {\n *   { \"test_int\", \"This is a test option of int type.\", offsetof(test_struct, int_opt),\n *     AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX },\n *   { \"test_str\", \"This is a test option of string type.\", offsetof(test_struct, str_opt),\n *     AV_OPT_TYPE_STRING },\n *   { \"test_bin\", \"This is a test option of binary type.\", offsetof(test_struct, bin_opt),\n *     AV_OPT_TYPE_BINARY },\n *   { NULL },\n * };\n *\n * static const AVClass test_class = {\n *     .class_name = \"test class\",\n *     .item_name  = av_default_item_name,\n *     .option     = test_options,\n *     .version    = LIBAVUTIL_VERSION_INT,\n * };\n * @endcode\n *\n * Next, when allocating your struct, you must ensure that the AVClass pointer\n * is set to the correct value. Then, av_opt_set_defaults() can be called to\n * initialize defaults. After that the struct is ready to be used with the\n * AVOptions API.\n *\n * When cleaning up, you may use the av_opt_free() function to automatically\n * free all the allocated string and binary options.\n *\n * Continuing with the above example:\n *\n * @code\n * test_struct *alloc_test_struct(void)\n * {\n *     test_struct *ret = av_malloc(sizeof(*ret));\n *     ret->class = &test_class;\n *     av_opt_set_defaults(ret);\n *     return ret;\n * }\n * void free_test_struct(test_struct **foo)\n * {\n *     av_opt_free(*foo);\n *     av_freep(foo);\n * }\n * @endcode\n *\n * @subsection avoptions_implement_nesting Nesting\n *      It may happen that an AVOptions-enabled struct contains another\n *      AVOptions-enabled struct as a member (e.g. AVCodecContext in\n *      libavcodec exports generic options, while its priv_data field exports\n *      codec-specific options). In such a case, it is possible to set up the\n *      parent struct to export a child's options. To do that, simply\n *      implement AVClass.child_next() and AVClass.child_class_next() in the\n *      parent struct's AVClass.\n *      Assuming that the test_struct from above now also contains a\n *      child_struct field:\n *\n *      @code\n *      typedef struct child_struct {\n *          AVClass *class;\n *          int flags_opt;\n *      } child_struct;\n *      static const AVOption child_opts[] = {\n *          { \"test_flags\", \"This is a test option of flags type.\",\n *            offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX },\n *          { NULL },\n *      };\n *      static const AVClass child_class = {\n *          .class_name = \"child class\",\n *          .item_name  = av_default_item_name,\n *          .option     = child_opts,\n *          .version    = LIBAVUTIL_VERSION_INT,\n *      };\n *\n *      void *child_next(void *obj, void *prev)\n *      {\n *          test_struct *t = obj;\n *          if (!prev && t->child_struct)\n *              return t->child_struct;\n *          return NULL\n *      }\n *      const AVClass child_class_next(const AVClass *prev)\n *      {\n *          return prev ? NULL : &child_class;\n *      }\n *      @endcode\n *      Putting child_next() and child_class_next() as defined above into\n *      test_class will now make child_struct's options accessible through\n *      test_struct (again, proper setup as described above needs to be done on\n *      child_struct right after it is created).\n *\n *      From the above example it might not be clear why both child_next()\n *      and child_class_next() are needed. The distinction is that child_next()\n *      iterates over actually existing objects, while child_class_next()\n *      iterates over all possible child classes. E.g. if an AVCodecContext\n *      was initialized to use a codec which has private options, then its\n *      child_next() will return AVCodecContext.priv_data and finish\n *      iterating. OTOH child_class_next() on AVCodecContext.av_class will\n *      iterate over all available codecs with private options.\n *\n * @subsection avoptions_implement_named_constants Named constants\n *      It is possible to create named constants for options. Simply set the unit\n *      field of the option the constants should apply to a string and\n *      create the constants themselves as options of type AV_OPT_TYPE_CONST\n *      with their unit field set to the same string.\n *      Their default_val field should contain the value of the named\n *      constant.\n *      For example, to add some named constants for the test_flags option\n *      above, put the following into the child_opts array:\n *      @code\n *      { \"test_flags\", \"This is a test option of flags type.\",\n *        offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, \"test_unit\" },\n *      { \"flag1\", \"This is a flag with value 16\", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, \"test_unit\" },\n *      @endcode\n *\n * @section avoptions_use Using AVOptions\n * This section deals with accessing options in an AVOptions-enabled struct.\n * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or\n * AVFormatContext in libavformat.\n *\n * @subsection avoptions_use_examine Examining AVOptions\n * The basic functions for examining options are av_opt_next(), which iterates\n * over all options defined for one object, and av_opt_find(), which searches\n * for an option with the given name.\n *\n * The situation is more complicated with nesting. An AVOptions-enabled struct\n * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag\n * to av_opt_find() will make the function search children recursively.\n *\n * For enumerating there are basically two cases. The first is when you want to\n * get all options that may potentially exist on the struct and its children\n * (e.g.  when constructing documentation). In that case you should call\n * av_opt_child_class_next() recursively on the parent struct's AVClass.  The\n * second case is when you have an already initialized struct with all its\n * children and you want to get all options that can be actually written or read\n * from it. In that case you should call av_opt_child_next() recursively (and\n * av_opt_next() on each result).\n *\n * @subsection avoptions_use_get_set Reading and writing AVOptions\n * When setting options, you often have a string read directly from the\n * user. In such a case, simply passing it to av_opt_set() is enough. For\n * non-string type options, av_opt_set() will parse the string according to the\n * option type.\n *\n * Similarly av_opt_get() will read any option type and convert it to a string\n * which will be returned. Do not forget that the string is allocated, so you\n * have to free it with av_free().\n *\n * In some cases it may be more convenient to put all options into an\n * AVDictionary and call av_opt_set_dict() on it. A specific case of this\n * are the format/codec open functions in lavf/lavc which take a dictionary\n * filled with option as a parameter. This allows to set some options\n * that cannot be set otherwise, since e.g. the input file format is not known\n * before the file is actually opened.\n */\n\nenum AVOptionType{\n    AV_OPT_TYPE_FLAGS,\n    AV_OPT_TYPE_INT,\n    AV_OPT_TYPE_INT64,\n    AV_OPT_TYPE_DOUBLE,\n    AV_OPT_TYPE_FLOAT,\n    AV_OPT_TYPE_STRING,\n    AV_OPT_TYPE_RATIONAL,\n    AV_OPT_TYPE_BINARY,  ///< offset must point to a pointer immediately followed by an int for the length\n    AV_OPT_TYPE_CONST = 128,\n    AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers\n    AV_OPT_TYPE_PIXEL_FMT  = MKBETAG('P','F','M','T'),\n    AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'),\n    AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational\n    AV_OPT_TYPE_DURATION   = MKBETAG('D','U','R',' '),\n    AV_OPT_TYPE_COLOR      = MKBETAG('C','O','L','R'),\n    AV_OPT_TYPE_CHANNEL_LAYOUT = MKBETAG('C','H','L','A'),\n#if FF_API_OLD_AVOPTIONS\n    FF_OPT_TYPE_FLAGS = 0,\n    FF_OPT_TYPE_INT,\n    FF_OPT_TYPE_INT64,\n    FF_OPT_TYPE_DOUBLE,\n    FF_OPT_TYPE_FLOAT,\n    FF_OPT_TYPE_STRING,\n    FF_OPT_TYPE_RATIONAL,\n    FF_OPT_TYPE_BINARY,  ///< offset must point to a pointer immediately followed by an int for the length\n    FF_OPT_TYPE_CONST=128,\n#endif\n};\n\n/**\n * AVOption\n */\ntypedef struct AVOption {\n    const char *name;\n\n    /**\n     * short English help text\n     * @todo What about other languages?\n     */\n    const char *help;\n\n    /**\n     * The offset relative to the context structure where the option\n     * value is stored. It should be 0 for named constants.\n     */\n    int offset;\n    enum AVOptionType type;\n\n    /**\n     * the default value for scalar options\n     */\n    union {\n        int64_t i64;\n        double dbl;\n        const char *str;\n        /* TODO those are unused now */\n        AVRational q;\n    } default_val;\n    double min;                 ///< minimum valid value for the option\n    double max;                 ///< maximum valid value for the option\n\n    int flags;\n#define AV_OPT_FLAG_ENCODING_PARAM  1   ///< a generic parameter which can be set by the user for muxing or encoding\n#define AV_OPT_FLAG_DECODING_PARAM  2   ///< a generic parameter which can be set by the user for demuxing or decoding\n#if FF_API_OPT_TYPE_METADATA\n#define AV_OPT_FLAG_METADATA        4   ///< some data extracted or inserted into the file like title, comment, ...\n#endif\n#define AV_OPT_FLAG_AUDIO_PARAM     8\n#define AV_OPT_FLAG_VIDEO_PARAM     16\n#define AV_OPT_FLAG_SUBTITLE_PARAM  32\n/**\n * The option is inteded for exporting values to the caller.\n */\n#define AV_OPT_FLAG_EXPORT          64\n/**\n * The option may not be set through the AVOptions API, only read.\n * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set.\n */\n#define AV_OPT_FLAG_READONLY        128\n#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering\n//FIXME think about enc-audio, ... style flags\n\n    /**\n     * The logical unit to which the option belongs. Non-constant\n     * options and corresponding named constants share the same\n     * unit. May be NULL.\n     */\n    const char *unit;\n} AVOption;\n\n/**\n * A single allowed range of values, or a single allowed value.\n */\ntypedef struct AVOptionRange {\n    const char *str;\n    double value_min, value_max;             ///< For string ranges this represents the min/max length, for dimensions this represents the min/max pixel count\n    double component_min, component_max;     ///< For string this represents the unicode range for chars, 0-127 limits to ASCII\n    int is_range;                            ///< if set to 1 the struct encodes a range, if set to 0 a single value\n} AVOptionRange;\n\n/**\n * List of AVOptionRange structs\n */\ntypedef struct AVOptionRanges {\n    AVOptionRange **range;\n    int nb_ranges;\n} AVOptionRanges;\n\n\n#if FF_API_FIND_OPT\n/**\n * Look for an option in obj. Look only for the options which\n * have the flags set as specified in mask and flags (that is,\n * for which it is the case that (opt->flags & mask) == flags).\n *\n * @param[in] obj a pointer to a struct whose first element is a\n * pointer to an AVClass\n * @param[in] name the name of the option to look for\n * @param[in] unit the unit of the option to look for, or any if NULL\n * @return a pointer to the option found, or NULL if no option\n * has been found\n *\n * @deprecated use av_opt_find.\n */\nattribute_deprecated\nconst AVOption *av_find_opt(void *obj, const char *name, const char *unit, int mask, int flags);\n#endif\n\n#if FF_API_OLD_AVOPTIONS\n/**\n * Set the field of obj with the given name to value.\n *\n * @param[in] obj A struct whose first element is a pointer to an\n * AVClass.\n * @param[in] name the name of the field to set\n * @param[in] val The value to set. If the field is not of a string\n * type, then the given string is parsed.\n * SI postfixes and some named scalars are supported.\n * If the field is of a numeric type, it has to be a numeric or named\n * scalar. Behavior with more than one scalar and +- infix operators\n * is undefined.\n * If the field is of a flags type, it has to be a sequence of numeric\n * scalars or named flags separated by '+' or '-'. Prefixing a flag\n * with '+' causes it to be set without affecting the other flags;\n * similarly, '-' unsets a flag.\n * @param[out] o_out if non-NULL put here a pointer to the AVOption\n * found\n * @param alloc this parameter is currently ignored\n * @return 0 if the value has been set, or an AVERROR code in case of\n * error:\n * AVERROR_OPTION_NOT_FOUND if no matching option exists\n * AVERROR(ERANGE) if the value is out of range\n * AVERROR(EINVAL) if the value is not valid\n * @deprecated use av_opt_set()\n */\nattribute_deprecated\nint av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out);\n\nattribute_deprecated const AVOption *av_set_double(void *obj, const char *name, double n);\nattribute_deprecated const AVOption *av_set_q(void *obj, const char *name, AVRational n);\nattribute_deprecated const AVOption *av_set_int(void *obj, const char *name, int64_t n);\n\ndouble av_get_double(void *obj, const char *name, const AVOption **o_out);\nAVRational av_get_q(void *obj, const char *name, const AVOption **o_out);\nint64_t av_get_int(void *obj, const char *name, const AVOption **o_out);\nattribute_deprecated const char *av_get_string(void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len);\nattribute_deprecated const AVOption *av_next_option(void *obj, const AVOption *last);\n#endif\n\n/**\n * Show the obj options.\n *\n * @param req_flags requested flags for the options to show. Show only the\n * options for which it is opt->flags & req_flags.\n * @param rej_flags rejected flags for the options to show. Show only the\n * options for which it is !(opt->flags & req_flags).\n * @param av_log_obj log context to use for showing the options\n */\nint av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags);\n\n/**\n * Set the values of all AVOption fields to their default values.\n *\n * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass)\n */\nvoid av_opt_set_defaults(void *s);\n\n#if FF_API_OLD_AVOPTIONS\nattribute_deprecated\nvoid av_opt_set_defaults2(void *s, int mask, int flags);\n#endif\n\n/**\n * Parse the key/value pairs list in opts. For each key/value pair\n * found, stores the value in the field in ctx that is named like the\n * key. ctx must be an AVClass context, storing is done using\n * AVOptions.\n *\n * @param opts options string to parse, may be NULL\n * @param key_val_sep a 0-terminated list of characters used to\n * separate key from value\n * @param pairs_sep a 0-terminated list of characters used to separate\n * two pairs from each other\n * @return the number of successfully set key/value pairs, or a negative\n * value corresponding to an AVERROR code in case of error:\n * AVERROR(EINVAL) if opts cannot be parsed,\n * the error code issued by av_set_string3() if a key/value pair\n * cannot be set\n */\nint av_set_options_string(void *ctx, const char *opts,\n                          const char *key_val_sep, const char *pairs_sep);\n\n/**\n * Parse the key-value pairs list in opts. For each key=value pair found,\n * set the value of the corresponding option in ctx.\n *\n * @param ctx          the AVClass object to set options on\n * @param opts         the options string, key-value pairs separated by a\n *                     delimiter\n * @param shorthand    a NULL-terminated array of options names for shorthand\n *                     notation: if the first field in opts has no key part,\n *                     the key is taken from the first element of shorthand;\n *                     then again for the second, etc., until either opts is\n *                     finished, shorthand is finished or a named option is\n *                     found; after that, all options must be named\n * @param key_val_sep  a 0-terminated list of characters used to separate\n *                     key from value, for example '='\n * @param pairs_sep    a 0-terminated list of characters used to separate\n *                     two pairs from each other, for example ':' or ','\n * @return  the number of successfully set key=value pairs, or a negative\n *          value corresponding to an AVERROR code in case of error:\n *          AVERROR(EINVAL) if opts cannot be parsed,\n *          the error code issued by av_set_string3() if a key/value pair\n *          cannot be set\n *\n * Options names must use only the following characters: a-z A-Z 0-9 - . / _\n * Separators must use characters distinct from option names and from each\n * other.\n */\nint av_opt_set_from_string(void *ctx, const char *opts,\n                           const char *const *shorthand,\n                           const char *key_val_sep, const char *pairs_sep);\n/**\n * Free all string and binary options in obj.\n */\nvoid av_opt_free(void *obj);\n\n/**\n * Check whether a particular flag is set in a flags field.\n *\n * @param field_name the name of the flag field option\n * @param flag_name the name of the flag to check\n * @return non-zero if the flag is set, zero if the flag isn't set,\n *         isn't of the right type, or the flags field doesn't exist.\n */\nint av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name);\n\n/**\n * Set all the options from a given dictionary on an object.\n *\n * @param obj a struct whose first element is a pointer to AVClass\n * @param options options to process. This dictionary will be freed and replaced\n *                by a new one containing all options not found in obj.\n *                Of course this new dictionary needs to be freed by caller\n *                with av_dict_free().\n *\n * @return 0 on success, a negative AVERROR if some option was found in obj,\n *         but could not be set.\n *\n * @see av_dict_copy()\n */\nint av_opt_set_dict(void *obj, struct AVDictionary **options);\n\n/**\n * Extract a key-value pair from the beginning of a string.\n *\n * @param ropts        pointer to the options string, will be updated to\n *                     point to the rest of the string (one of the pairs_sep\n *                     or the final NUL)\n * @param key_val_sep  a 0-terminated list of characters used to separate\n *                     key from value, for example '='\n * @param pairs_sep    a 0-terminated list of characters used to separate\n *                     two pairs from each other, for example ':' or ','\n * @param flags        flags; see the AV_OPT_FLAG_* values below\n * @param rkey         parsed key; must be freed using av_free()\n * @param rval         parsed value; must be freed using av_free()\n *\n * @return  >=0 for success, or a negative value corresponding to an\n *          AVERROR code in case of error; in particular:\n *          AVERROR(EINVAL) if no key is present\n *\n */\nint av_opt_get_key_value(const char **ropts,\n                         const char *key_val_sep, const char *pairs_sep,\n                         unsigned flags,\n                         char **rkey, char **rval);\n\nenum {\n\n    /**\n     * Accept to parse a value without a key; the key will then be returned\n     * as NULL.\n     */\n    AV_OPT_FLAG_IMPLICIT_KEY = 1,\n};\n\n/**\n * @defgroup opt_eval_funcs Evaluating option strings\n * @{\n * This group of functions can be used to evaluate option strings\n * and get numbers out of them. They do the same thing as av_opt_set(),\n * except the result is written into the caller-supplied pointer.\n *\n * @param obj a struct whose first element is a pointer to AVClass.\n * @param o an option for which the string is to be evaluated.\n * @param val string to be evaluated.\n * @param *_out value of the string will be written here.\n *\n * @return 0 on success, a negative number on failure.\n */\nint av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int        *flags_out);\nint av_opt_eval_int   (void *obj, const AVOption *o, const char *val, int        *int_out);\nint av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t    *int64_out);\nint av_opt_eval_float (void *obj, const AVOption *o, const char *val, float      *float_out);\nint av_opt_eval_double(void *obj, const AVOption *o, const char *val, double     *double_out);\nint av_opt_eval_q     (void *obj, const AVOption *o, const char *val, AVRational *q_out);\n/**\n * @}\n */\n\n#define AV_OPT_SEARCH_CHILDREN   0x0001 /**< Search in possible children of the\n                                             given object first. */\n/**\n *  The obj passed to av_opt_find() is fake -- only a double pointer to AVClass\n *  instead of a required pointer to a struct containing AVClass. This is\n *  useful for searching for options without needing to allocate the corresponding\n *  object.\n */\n#define AV_OPT_SEARCH_FAKE_OBJ   0x0002\n\n/**\n * Look for an option in an object. Consider only options which\n * have all the specified flags set.\n *\n * @param[in] obj A pointer to a struct whose first element is a\n *                pointer to an AVClass.\n *                Alternatively a double pointer to an AVClass, if\n *                AV_OPT_SEARCH_FAKE_OBJ search flag is set.\n * @param[in] name The name of the option to look for.\n * @param[in] unit When searching for named constants, name of the unit\n *                 it belongs to.\n * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).\n * @param search_flags A combination of AV_OPT_SEARCH_*.\n *\n * @return A pointer to the option found, or NULL if no option\n *         was found.\n *\n * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable\n * directly with av_set_string3(). Use special calls which take an options\n * AVDictionary (e.g. avformat_open_input()) to set options found with this\n * flag.\n */\nconst AVOption *av_opt_find(void *obj, const char *name, const char *unit,\n                            int opt_flags, int search_flags);\n\n/**\n * Look for an option in an object. Consider only options which\n * have all the specified flags set.\n *\n * @param[in] obj A pointer to a struct whose first element is a\n *                pointer to an AVClass.\n *                Alternatively a double pointer to an AVClass, if\n *                AV_OPT_SEARCH_FAKE_OBJ search flag is set.\n * @param[in] name The name of the option to look for.\n * @param[in] unit When searching for named constants, name of the unit\n *                 it belongs to.\n * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).\n * @param search_flags A combination of AV_OPT_SEARCH_*.\n * @param[out] target_obj if non-NULL, an object to which the option belongs will be\n * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present\n * in search_flags. This parameter is ignored if search_flags contain\n * AV_OPT_SEARCH_FAKE_OBJ.\n *\n * @return A pointer to the option found, or NULL if no option\n *         was found.\n */\nconst AVOption *av_opt_find2(void *obj, const char *name, const char *unit,\n                             int opt_flags, int search_flags, void **target_obj);\n\n/**\n * Iterate over all AVOptions belonging to obj.\n *\n * @param obj an AVOptions-enabled struct or a double pointer to an\n *            AVClass describing it.\n * @param prev result of the previous call to av_opt_next() on this object\n *             or NULL\n * @return next AVOption or NULL\n */\nconst AVOption *av_opt_next(void *obj, const AVOption *prev);\n\n/**\n * Iterate over AVOptions-enabled children of obj.\n *\n * @param prev result of a previous call to this function or NULL\n * @return next AVOptions-enabled child or NULL\n */\nvoid *av_opt_child_next(void *obj, void *prev);\n\n/**\n * Iterate over potential AVOptions-enabled children of parent.\n *\n * @param prev result of a previous call to this function or NULL\n * @return AVClass corresponding to next potential child or NULL\n */\nconst AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev);\n\n/**\n * @defgroup opt_set_funcs Option setting functions\n * @{\n * Those functions set the field of obj with the given name to value.\n *\n * @param[in] obj A struct whose first element is a pointer to an AVClass.\n * @param[in] name the name of the field to set\n * @param[in] val The value to set. In case of av_opt_set() if the field is not\n * of a string type, then the given string is parsed.\n * SI postfixes and some named scalars are supported.\n * If the field is of a numeric type, it has to be a numeric or named\n * scalar. Behavior with more than one scalar and +- infix operators\n * is undefined.\n * If the field is of a flags type, it has to be a sequence of numeric\n * scalars or named flags separated by '+' or '-'. Prefixing a flag\n * with '+' causes it to be set without affecting the other flags;\n * similarly, '-' unsets a flag.\n * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN\n * is passed here, then the option may be set on a child of obj.\n *\n * @return 0 if the value has been set, or an AVERROR code in case of\n * error:\n * AVERROR_OPTION_NOT_FOUND if no matching option exists\n * AVERROR(ERANGE) if the value is out of range\n * AVERROR(EINVAL) if the value is not valid\n */\nint av_opt_set       (void *obj, const char *name, const char *val, int search_flags);\nint av_opt_set_int   (void *obj, const char *name, int64_t     val, int search_flags);\nint av_opt_set_double(void *obj, const char *name, double      val, int search_flags);\nint av_opt_set_q     (void *obj, const char *name, AVRational  val, int search_flags);\nint av_opt_set_bin   (void *obj, const char *name, const uint8_t *val, int size, int search_flags);\nint av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags);\nint av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags);\nint av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags);\nint av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags);\nint av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags);\n\n/**\n * Set a binary option to an integer list.\n *\n * @param obj    AVClass object to set options on\n * @param name   name of the binary option\n * @param val    pointer to an integer list (must have the correct type with\n *               regard to the contents of the list)\n * @param term   list terminator (usually 0 or -1)\n * @param flags  search flags\n */\n#define av_opt_set_int_list(obj, name, val, term, flags) \\\n    (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \\\n     AVERROR(EINVAL) : \\\n     av_opt_set_bin(obj, name, (const uint8_t *)(val), \\\n                    av_int_list_length(val, term) * sizeof(*(val)), flags))\n/**\n * @}\n */\n\n/**\n * @defgroup opt_get_funcs Option getting functions\n * @{\n * Those functions get a value of the option with the given name from an object.\n *\n * @param[in] obj a struct whose first element is a pointer to an AVClass.\n * @param[in] name name of the option to get.\n * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN\n * is passed here, then the option may be found in a child of obj.\n * @param[out] out_val value of the option will be written here\n * @return >=0 on success, a negative error code otherwise\n */\n/**\n * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller\n */\nint av_opt_get       (void *obj, const char *name, int search_flags, uint8_t   **out_val);\nint av_opt_get_int   (void *obj, const char *name, int search_flags, int64_t    *out_val);\nint av_opt_get_double(void *obj, const char *name, int search_flags, double     *out_val);\nint av_opt_get_q     (void *obj, const char *name, int search_flags, AVRational *out_val);\nint av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out);\nint av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt);\nint av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt);\nint av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val);\nint av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout);\n/**\n * @}\n */\n/**\n * Gets a pointer to the requested field in a struct.\n * This function allows accessing a struct even when its fields are moved or\n * renamed since the application making the access has been compiled,\n *\n * @returns a pointer to the field, it can be cast to the correct type and read\n *          or written to.\n */\nvoid *av_opt_ptr(const AVClass *avclass, void *obj, const char *name);\n\n/**\n * Free an AVOptionRanges struct and set it to NULL.\n */\nvoid av_opt_freep_ranges(AVOptionRanges **ranges);\n\n/**\n * Get a list of allowed ranges for the given option.\n *\n * The returned list may depend on other fields in obj like for example profile.\n *\n * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored\n *              AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance\n *\n * The result must be freed with av_opt_freep_ranges.\n *\n * @return >= 0 on success, a negative errro code otherwise\n */\nint av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags);\n\n/**\n * Get a default list of allowed ranges for the given option.\n *\n * This list is constructed without using the AVClass.query_ranges() callback\n * and can be used as fallback from within the callback.\n *\n * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored\n *              AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance\n *\n * The result must be freed with av_opt_free_ranges.\n *\n * @return >= 0 on success, a negative errro code otherwise\n */\nint av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_OPT_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/parseutils.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_PARSEUTILS_H\n#define AVUTIL_PARSEUTILS_H\n\n#include <time.h>\n\n#include \"rational.h\"\n\n/**\n * @file\n * misc parsing utilities\n */\n\n/**\n * Parse str and store the parsed ratio in q.\n *\n * Note that a ratio with infinite (1/0) or negative value is\n * considered valid, so you should check on the returned value if you\n * want to exclude those values.\n *\n * The undefined value can be expressed using the \"0:0\" string.\n *\n * @param[in,out] q pointer to the AVRational which will contain the ratio\n * @param[in] str the string to parse: it has to be a string in the format\n * num:den, a float number or an expression\n * @param[in] max the maximum allowed numerator and denominator\n * @param[in] log_offset log level offset which is applied to the log\n * level of log_ctx\n * @param[in] log_ctx parent logging context\n * @return >= 0 on success, a negative error code otherwise\n */\nint av_parse_ratio(AVRational *q, const char *str, int max,\n                   int log_offset, void *log_ctx);\n\n#define av_parse_ratio_quiet(rate, str, max) \\\n    av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL)\n\n/**\n * Parse str and put in width_ptr and height_ptr the detected values.\n *\n * @param[in,out] width_ptr pointer to the variable which will contain the detected\n * width value\n * @param[in,out] height_ptr pointer to the variable which will contain the detected\n * height value\n * @param[in] str the string to parse: it has to be a string in the format\n * width x height or a valid video size abbreviation.\n * @return >= 0 on success, a negative error code otherwise\n */\nint av_parse_video_size(int *width_ptr, int *height_ptr, const char *str);\n\n/**\n * Parse str and store the detected values in *rate.\n *\n * @param[in,out] rate pointer to the AVRational which will contain the detected\n * frame rate\n * @param[in] str the string to parse: it has to be a string in the format\n * rate_num / rate_den, a float number or a valid video rate abbreviation\n * @return >= 0 on success, a negative error code otherwise\n */\nint av_parse_video_rate(AVRational *rate, const char *str);\n\n/**\n * Put the RGBA values that correspond to color_string in rgba_color.\n *\n * @param color_string a string specifying a color. It can be the name of\n * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence,\n * possibly followed by \"@\" and a string representing the alpha\n * component.\n * The alpha component may be a string composed by \"0x\" followed by an\n * hexadecimal number or a decimal number between 0.0 and 1.0, which\n * represents the opacity value (0x00/0.0 means completely transparent,\n * 0xff/1.0 completely opaque).\n * If the alpha component is not specified then 0xff is assumed.\n * The string \"random\" will result in a random color.\n * @param slen length of the initial part of color_string containing the\n * color. It can be set to -1 if color_string is a null terminated string\n * containing nothing else than the color.\n * @return >= 0 in case of success, a negative value in case of\n * failure (for example if color_string cannot be parsed).\n */\nint av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,\n                   void *log_ctx);\n\n/**\n * Get the name of a color from the internal table of hard-coded named\n * colors.\n *\n * This function is meant to enumerate the color names recognized by\n * av_parse_color().\n *\n * @param color_idx index of the requested color, starting from 0\n * @param rgbp      if not NULL, will point to a 3-elements array with the color value in RGB\n * @return the color name string or NULL if color_idx is not in the array\n */\nconst char *av_get_known_color_name(int color_idx, const uint8_t **rgb);\n\n/**\n * Parse timestr and return in *time a corresponding number of\n * microseconds.\n *\n * @param timeval puts here the number of microseconds corresponding\n * to the string in timestr. If the string represents a duration, it\n * is the number of microseconds contained in the time interval.  If\n * the string is a date, is the number of microseconds since 1st of\n * January, 1970 up to the time of the parsed date.  If timestr cannot\n * be successfully parsed, set *time to INT64_MIN.\n\n * @param timestr a string representing a date or a duration.\n * - If a date the syntax is:\n * @code\n * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z]\n * now\n * @endcode\n * If the value is \"now\" it takes the current time.\n * Time is local time unless Z is appended, in which case it is\n * interpreted as UTC.\n * If the year-month-day part is not specified it takes the current\n * year-month-day.\n * - If a duration the syntax is:\n * @code\n * [-][HH:]MM:SS[.m...]\n * [-]S+[.m...]\n * @endcode\n * @param duration flag which tells how to interpret timestr, if not\n * zero timestr is interpreted as a duration, otherwise as a date\n * @return >= 0 in case of success, a negative value corresponding to an\n * AVERROR code otherwise\n */\nint av_parse_time(int64_t *timeval, const char *timestr, int duration);\n\n/**\n * Parse the input string p according to the format string fmt and\n * store its results in the structure dt.\n * This implementation supports only a subset of the formats supported\n * by the standard strptime().\n *\n * In particular it actually supports the parameters:\n * - %H: the hour as a decimal number, using a 24-hour clock, in the\n * range '00' through '23'\n * - %J: hours as a decimal number, in the range '0' through INT_MAX\n * - %M: the minute as a decimal number, using a 24-hour clock, in the\n * range '00' through '59'\n * - %S: the second as a decimal number, using a 24-hour clock, in the\n * range '00' through '59'\n * - %Y: the year as a decimal number, using the Gregorian calendar\n * - %m: the month as a decimal number, in the range '1' through '12'\n * - %d: the day of the month as a decimal number, in the range '1'\n * through '31'\n * - %%: a literal '%'\n *\n * @return a pointer to the first character not processed in this\n * function call, or NULL in case the function fails to match all of\n * the fmt string and therefore an error occurred\n */\nchar *av_small_strptime(const char *p, const char *fmt, struct tm *dt);\n\n/**\n * Attempt to find a specific tag in a URL.\n *\n * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.\n * Return 1 if found.\n */\nint av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);\n\n/**\n * Convert the decomposed UTC time in tm to a time_t value.\n */\ntime_t av_timegm(struct tm *tm);\n\n#endif /* AVUTIL_PARSEUTILS_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/pixdesc.h",
    "content": "/*\n * pixel format descriptor\n * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_PIXDESC_H\n#define AVUTIL_PIXDESC_H\n\n#include <inttypes.h>\n\n#include \"attributes.h\"\n#include \"pixfmt.h\"\n\ntypedef struct AVComponentDescriptor{\n    uint16_t plane        :2;            ///< which of the 4 planes contains the component\n\n    /**\n     * Number of elements between 2 horizontally consecutive pixels minus 1.\n     * Elements are bits for bitstream formats, bytes otherwise.\n     */\n    uint16_t step_minus1  :3;\n\n    /**\n     * Number of elements before the component of the first pixel plus 1.\n     * Elements are bits for bitstream formats, bytes otherwise.\n     */\n    uint16_t offset_plus1 :3;\n    uint16_t shift        :3;            ///< number of least significant bits that must be shifted away to get the value\n    uint16_t depth_minus1 :4;            ///< number of bits in the component minus 1\n}AVComponentDescriptor;\n\n/**\n * Descriptor that unambiguously describes how the bits of a pixel are\n * stored in the up to 4 data planes of an image. It also stores the\n * subsampling factors and number of components.\n *\n * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV\n *       and all the YUV variants) AVPixFmtDescriptor just stores how values\n *       are stored not what these values represent.\n */\ntypedef struct AVPixFmtDescriptor{\n    const char *name;\n    uint8_t nb_components;      ///< The number of components each pixel has, (1-4)\n\n    /**\n     * Amount to shift the luma width right to find the chroma width.\n     * For YV12 this is 1 for example.\n     * chroma_width = -((-luma_width) >> log2_chroma_w)\n     * The note above is needed to ensure rounding up.\n     * This value only refers to the chroma components.\n     */\n    uint8_t log2_chroma_w;      ///< chroma_width = -((-luma_width )>>log2_chroma_w)\n\n    /**\n     * Amount to shift the luma height right to find the chroma height.\n     * For YV12 this is 1 for example.\n     * chroma_height= -((-luma_height) >> log2_chroma_h)\n     * The note above is needed to ensure rounding up.\n     * This value only refers to the chroma components.\n     */\n    uint8_t log2_chroma_h;\n    uint8_t flags;\n\n    /**\n     * Parameters that describe how pixels are packed.\n     * If the format has 2 or 4 components, then alpha is last.\n     * If the format has 1 or 2 components, then luma is 0.\n     * If the format has 3 or 4 components,\n     * if the RGB flag is set then 0 is red, 1 is green and 2 is blue;\n     * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V.\n     */\n    AVComponentDescriptor comp[4];\n}AVPixFmtDescriptor;\n\n/**\n * Pixel format is big-endian.\n */\n#define AV_PIX_FMT_FLAG_BE           (1 << 0)\n/**\n * Pixel format has a palette in data[1], values are indexes in this palette.\n */\n#define AV_PIX_FMT_FLAG_PAL          (1 << 1)\n/**\n * All values of a component are bit-wise packed end to end.\n */\n#define AV_PIX_FMT_FLAG_BITSTREAM    (1 << 2)\n/**\n * Pixel format is an HW accelerated format.\n */\n#define AV_PIX_FMT_FLAG_HWACCEL      (1 << 3)\n/**\n * At least one pixel component is not in the first data plane.\n */\n#define AV_PIX_FMT_FLAG_PLANAR       (1 << 4)\n/**\n * The pixel format contains RGB-like data (as opposed to YUV/grayscale).\n */\n#define AV_PIX_FMT_FLAG_RGB          (1 << 5)\n/**\n * The pixel format is \"pseudo-paletted\". This means that FFmpeg treats it as\n * paletted internally, but the palette is generated by the decoder and is not\n * stored in the file.\n */\n#define AV_PIX_FMT_FLAG_PSEUDOPAL    (1 << 6)\n/**\n * The pixel format has an alpha channel.\n */\n#define AV_PIX_FMT_FLAG_ALPHA        (1 << 7)\n\n#if FF_API_PIX_FMT\n/**\n * @deprecated use the AV_PIX_FMT_FLAG_* flags\n */\n#define PIX_FMT_BE        AV_PIX_FMT_FLAG_BE\n#define PIX_FMT_PAL       AV_PIX_FMT_FLAG_PAL\n#define PIX_FMT_BITSTREAM AV_PIX_FMT_FLAG_BITSTREAM\n#define PIX_FMT_HWACCEL   AV_PIX_FMT_FLAG_HWACCEL\n#define PIX_FMT_PLANAR    AV_PIX_FMT_FLAG_PLANAR\n#define PIX_FMT_RGB       AV_PIX_FMT_FLAG_RGB\n#define PIX_FMT_PSEUDOPAL AV_PIX_FMT_FLAG_PSEUDOPAL\n#define PIX_FMT_ALPHA     AV_PIX_FMT_FLAG_ALPHA\n#endif\n\n#if FF_API_PIX_FMT_DESC\n/**\n * The array of all the pixel format descriptors.\n */\nextern attribute_deprecated const AVPixFmtDescriptor av_pix_fmt_descriptors[];\n#endif\n\n/**\n * Read a line from an image, and write the values of the\n * pixel format component c to dst.\n *\n * @param data the array containing the pointers to the planes of the image\n * @param linesize the array containing the linesizes of the image\n * @param desc the pixel format descriptor for the image\n * @param x the horizontal coordinate of the first pixel to read\n * @param y the vertical coordinate of the first pixel to read\n * @param w the width of the line to read, that is the number of\n * values to write to dst\n * @param read_pal_component if not zero and the format is a paletted\n * format writes the values corresponding to the palette\n * component c in data[1] to dst, rather than the palette indexes in\n * data[0]. The behavior is undefined if the format is not paletted.\n */\nvoid av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4],\n                        const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component);\n\n/**\n * Write the values from src to the pixel format component c of an\n * image line.\n *\n * @param src array containing the values to write\n * @param data the array containing the pointers to the planes of the\n * image to write into. It is supposed to be zeroed.\n * @param linesize the array containing the linesizes of the image\n * @param desc the pixel format descriptor for the image\n * @param x the horizontal coordinate of the first pixel to write\n * @param y the vertical coordinate of the first pixel to write\n * @param w the width of the line to write, that is the number of\n * values to write to the image line\n */\nvoid av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesize[4],\n                         const AVPixFmtDescriptor *desc, int x, int y, int c, int w);\n\n/**\n * Return the pixel format corresponding to name.\n *\n * If there is no pixel format with name name, then looks for a\n * pixel format with the name corresponding to the native endian\n * format of name.\n * For example in a little-endian system, first looks for \"gray16\",\n * then for \"gray16le\".\n *\n * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE.\n */\nenum AVPixelFormat av_get_pix_fmt(const char *name);\n\n/**\n * Return the short name for a pixel format, NULL in case pix_fmt is\n * unknown.\n *\n * @see av_get_pix_fmt(), av_get_pix_fmt_string()\n */\nconst char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt);\n\n/**\n * Print in buf the string corresponding to the pixel format with\n * number pix_fmt, or a header if pix_fmt is negative.\n *\n * @param buf the buffer where to write the string\n * @param buf_size the size of buf\n * @param pix_fmt the number of the pixel format to print the\n * corresponding info string, or a negative value to print the\n * corresponding header.\n */\nchar *av_get_pix_fmt_string (char *buf, int buf_size, enum AVPixelFormat pix_fmt);\n\n/**\n * Return the number of bits per pixel used by the pixel format\n * described by pixdesc. Note that this is not the same as the number\n * of bits per sample.\n *\n * The returned number of bits refers to the number of bits actually\n * used for storing the pixel information, that is padding bits are\n * not counted.\n */\nint av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);\n\n/**\n * Return the number of bits per pixel for the pixel format\n * described by pixdesc, including any padding or unused bits.\n */\nint av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);\n\n/**\n * @return a pixel format descriptor for provided pixel format or NULL if\n * this pixel format is unknown.\n */\nconst AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt);\n\n/**\n * Iterate over all pixel format descriptors known to libavutil.\n *\n * @param prev previous descriptor. NULL to get the first descriptor.\n *\n * @return next descriptor or NULL after the last descriptor\n */\nconst AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev);\n\n/**\n * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc\n * is not a valid pointer to a pixel format descriptor.\n */\nenum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc);\n\n/**\n * Utility function to access log2_chroma_w log2_chroma_h from\n * the pixel format AVPixFmtDescriptor.\n *\n * See avcodec_get_chroma_sub_sample() for a function that asserts a\n * valid pixel format instead of returning an error code.\n * Its recommanded that you use avcodec_get_chroma_sub_sample unless\n * you do check the return code!\n *\n * @param[in]  pix_fmt the pixel format\n * @param[out] h_shift store log2_chroma_w\n * @param[out] v_shift store log2_chroma_h\n *\n * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format\n */\nint av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt,\n                                     int *h_shift, int *v_shift);\n\n/**\n * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a\n * valid pixel format.\n */\nint av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt);\n\nvoid ff_check_pixfmt_descriptors(void);\n\n/**\n * Utility function to swap the endianness of a pixel format.\n *\n * @param[in]  pix_fmt the pixel format\n *\n * @return pixel format with swapped endianness if it exists,\n * otherwise AV_PIX_FMT_NONE\n */\nenum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt);\n\n\n#endif /* AVUTIL_PIXDESC_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/pixfmt.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_PIXFMT_H\n#define AVUTIL_PIXFMT_H\n\n/**\n * @file\n * pixel format definitions\n *\n */\n\n#include \"libavutil/avconfig.h\"\n#include \"version.h\"\n\n#define AVPALETTE_SIZE 1024\n#define AVPALETTE_COUNT 256\n\n/**\n * Pixel format.\n *\n * @note\n * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA\n * color is put together as:\n *  (A << 24) | (R << 16) | (G << 8) | B\n * This is stored as BGRA on little-endian CPU architectures and ARGB on\n * big-endian CPUs.\n *\n * @par\n * When the pixel format is palettized RGB (AV_PIX_FMT_PAL8), the palettized\n * image data is stored in AVFrame.data[0]. The palette is transported in\n * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is\n * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is\n * also endian-specific). Note also that the individual RGB palette\n * components stored in AVFrame.data[1] should be in the range 0..255.\n * This is important as many custom PAL8 video codecs that were designed\n * to run on the IBM VGA graphics adapter use 6-bit palette components.\n *\n * @par\n * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like\n * for pal8. This palette is filled in automatically by the function\n * allocating the picture.\n *\n * @note\n * Make sure that all newly added big-endian formats have (pix_fmt & 1) == 1\n * and that all newly added little-endian formats have (pix_fmt & 1) == 0.\n * This allows simpler detection of big vs little-endian.\n */\nenum AVPixelFormat {\n    AV_PIX_FMT_NONE = -1,\n    AV_PIX_FMT_YUV420P,   ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)\n    AV_PIX_FMT_YUYV422,   ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr\n    AV_PIX_FMT_RGB24,     ///< packed RGB 8:8:8, 24bpp, RGBRGB...\n    AV_PIX_FMT_BGR24,     ///< packed RGB 8:8:8, 24bpp, BGRBGR...\n    AV_PIX_FMT_YUV422P,   ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)\n    AV_PIX_FMT_YUV444P,   ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)\n    AV_PIX_FMT_YUV410P,   ///< planar YUV 4:1:0,  9bpp, (1 Cr & Cb sample per 4x4 Y samples)\n    AV_PIX_FMT_YUV411P,   ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)\n    AV_PIX_FMT_GRAY8,     ///<        Y        ,  8bpp\n    AV_PIX_FMT_MONOWHITE, ///<        Y        ,  1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb\n    AV_PIX_FMT_MONOBLACK, ///<        Y        ,  1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb\n    AV_PIX_FMT_PAL8,      ///< 8 bit with PIX_FMT_RGB32 palette\n    AV_PIX_FMT_YUVJ420P,  ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range\n    AV_PIX_FMT_YUVJ422P,  ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range\n    AV_PIX_FMT_YUVJ444P,  ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range\n#if FF_API_XVMC\n    AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing\n    AV_PIX_FMT_XVMC_MPEG2_IDCT,\n#define AV_PIX_FMT_XVMC AV_PIX_FMT_XVMC_MPEG2_IDCT\n#endif /* FF_API_XVMC */\n    AV_PIX_FMT_UYVY422,   ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1\n    AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3\n    AV_PIX_FMT_BGR8,      ///< packed RGB 3:3:2,  8bpp, (msb)2B 3G 3R(lsb)\n    AV_PIX_FMT_BGR4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits\n    AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1B 2G 1R(lsb)\n    AV_PIX_FMT_RGB8,      ///< packed RGB 3:3:2,  8bpp, (msb)2R 3G 3B(lsb)\n    AV_PIX_FMT_RGB4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits\n    AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1R 2G 1B(lsb)\n    AV_PIX_FMT_NV12,      ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)\n    AV_PIX_FMT_NV21,      ///< as above, but U and V bytes are swapped\n\n    AV_PIX_FMT_ARGB,      ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...\n    AV_PIX_FMT_RGBA,      ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...\n    AV_PIX_FMT_ABGR,      ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...\n    AV_PIX_FMT_BGRA,      ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...\n\n    AV_PIX_FMT_GRAY16BE,  ///<        Y        , 16bpp, big-endian\n    AV_PIX_FMT_GRAY16LE,  ///<        Y        , 16bpp, little-endian\n    AV_PIX_FMT_YUV440P,   ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)\n    AV_PIX_FMT_YUVJ440P,  ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range\n    AV_PIX_FMT_YUVA420P,  ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)\n#if FF_API_VDPAU\n    AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n#endif\n    AV_PIX_FMT_RGB48BE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian\n    AV_PIX_FMT_RGB48LE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian\n\n    AV_PIX_FMT_RGB565BE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), big-endian\n    AV_PIX_FMT_RGB565LE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), little-endian\n    AV_PIX_FMT_RGB555BE,  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0\n    AV_PIX_FMT_RGB555LE,  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0\n\n    AV_PIX_FMT_BGR565BE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), big-endian\n    AV_PIX_FMT_BGR565LE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), little-endian\n    AV_PIX_FMT_BGR555BE,  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1\n    AV_PIX_FMT_BGR555LE,  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1\n\n    AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers\n    AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers\n    AV_PIX_FMT_VAAPI_VLD,  ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n\n    AV_PIX_FMT_YUV420P16LE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    AV_PIX_FMT_YUV420P16BE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    AV_PIX_FMT_YUV422P16LE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_YUV422P16BE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    AV_PIX_FMT_YUV444P16LE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    AV_PIX_FMT_YUV444P16BE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n#if FF_API_VDPAU\n    AV_PIX_FMT_VDPAU_MPEG4,  ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n#endif\n    AV_PIX_FMT_DXVA2_VLD,    ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer\n\n    AV_PIX_FMT_RGB444LE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0\n    AV_PIX_FMT_RGB444BE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0\n    AV_PIX_FMT_BGR444LE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1\n    AV_PIX_FMT_BGR444BE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1\n    AV_PIX_FMT_GRAY8A,    ///< 8bit gray, 8bit alpha\n    AV_PIX_FMT_BGR48BE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian\n    AV_PIX_FMT_BGR48LE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian\n\n    /**\n     * The following 12 formats have the disadvantage of needing 1 format for each bit depth.\n     * Notice that each 9/10 bits sample is stored in 16 bits with extra padding.\n     * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.\n     */\n    AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_VDA_VLD,    ///< hardware decoding through VDA\n\n#ifdef AV_PIX_FMT_ABI_GIT_MASTER\n    AV_PIX_FMT_RGBA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    AV_PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n    AV_PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    AV_PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n#endif\n    AV_PIX_FMT_GBRP,      ///< planar GBR 4:4:4 24bpp\n    AV_PIX_FMT_GBRP9BE,   ///< planar GBR 4:4:4 27bpp, big-endian\n    AV_PIX_FMT_GBRP9LE,   ///< planar GBR 4:4:4 27bpp, little-endian\n    AV_PIX_FMT_GBRP10BE,  ///< planar GBR 4:4:4 30bpp, big-endian\n    AV_PIX_FMT_GBRP10LE,  ///< planar GBR 4:4:4 30bpp, little-endian\n    AV_PIX_FMT_GBRP16BE,  ///< planar GBR 4:4:4 48bpp, big-endian\n    AV_PIX_FMT_GBRP16LE,  ///< planar GBR 4:4:4 48bpp, little-endian\n\n    /**\n     * duplicated pixel formats for compatibility with libav.\n     * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55)\n     * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85)\n     */\n    AV_PIX_FMT_YUVA422P_LIBAV,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)\n    AV_PIX_FMT_YUVA444P_LIBAV,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)\n\n    AV_PIX_FMT_YUVA420P9BE,  ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian\n    AV_PIX_FMT_YUVA420P9LE,  ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian\n    AV_PIX_FMT_YUVA422P9BE,  ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian\n    AV_PIX_FMT_YUVA422P9LE,  ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian\n    AV_PIX_FMT_YUVA444P9BE,  ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian\n    AV_PIX_FMT_YUVA444P9LE,  ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian\n    AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)\n    AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)\n    AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)\n    AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)\n    AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)\n    AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)\n\n    AV_PIX_FMT_VDPAU,     ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface\n\n    AV_PIX_FMT_XYZ12LE,      ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0\n    AV_PIX_FMT_XYZ12BE,      ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0\n    AV_PIX_FMT_NV16,         ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)\n    AV_PIX_FMT_NV20LE,       ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_NV20BE,       ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n\n#ifndef AV_PIX_FMT_ABI_GIT_MASTER\n    AV_PIX_FMT_RGBA64BE=0x123,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    AV_PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n    AV_PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    AV_PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n#endif\n    AV_PIX_FMT_0RGB=0x123+4,      ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...\n    AV_PIX_FMT_RGB0,      ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...\n    AV_PIX_FMT_0BGR,      ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...\n    AV_PIX_FMT_BGR0,      ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...\n    AV_PIX_FMT_YUVA444P,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)\n    AV_PIX_FMT_YUVA422P,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)\n\n    AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    AV_PIX_FMT_GBRP12BE,    ///< planar GBR 4:4:4 36bpp, big-endian\n    AV_PIX_FMT_GBRP12LE,    ///< planar GBR 4:4:4 36bpp, little-endian\n    AV_PIX_FMT_GBRP14BE,    ///< planar GBR 4:4:4 42bpp, big-endian\n    AV_PIX_FMT_GBRP14LE,    ///< planar GBR 4:4:4 42bpp, little-endian\n    AV_PIX_FMT_GBRAP,       ///< planar GBRA 4:4:4:4 32bpp\n    AV_PIX_FMT_GBRAP16BE,   ///< planar GBRA 4:4:4:4 64bpp, big-endian\n    AV_PIX_FMT_GBRAP16LE,   ///< planar GBRA 4:4:4:4 64bpp, little-endian\n    AV_PIX_FMT_YUVJ411P,    ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range\n\n    AV_PIX_FMT_BAYER_BGGR8,    ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */\n    AV_PIX_FMT_BAYER_RGGB8,    ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */\n    AV_PIX_FMT_BAYER_GBRG8,    ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */\n    AV_PIX_FMT_BAYER_GRBG8,    ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */\n    AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */\n    AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */\n    AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */\n    AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */\n    AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */\n    AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */\n    AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */\n    AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */\n#if !FF_API_XVMC\n    AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing\n#endif /* !FF_API_XVMC */\n\n    AV_PIX_FMT_NB,        ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions\n\n#if FF_API_PIX_FMT\n#include \"old_pix_fmts.h\"\n#endif\n};\n\n#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI\n#define AV_PIX_FMT_YUVA422P AV_PIX_FMT_YUVA422P_LIBAV\n#define AV_PIX_FMT_YUVA444P AV_PIX_FMT_YUVA444P_LIBAV\n#endif\n\n\n#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A\n#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP\n\n#if AV_HAVE_BIGENDIAN\n#   define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be\n#else\n#   define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le\n#endif\n\n#define AV_PIX_FMT_RGB32   AV_PIX_FMT_NE(ARGB, BGRA)\n#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)\n#define AV_PIX_FMT_BGR32   AV_PIX_FMT_NE(ABGR, RGBA)\n#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)\n#define AV_PIX_FMT_0RGB32  AV_PIX_FMT_NE(0RGB, BGR0)\n#define AV_PIX_FMT_0BGR32  AV_PIX_FMT_NE(0BGR, RGB0)\n\n#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)\n#define AV_PIX_FMT_RGB48  AV_PIX_FMT_NE(RGB48BE,  RGB48LE)\n#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)\n#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)\n#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)\n#define AV_PIX_FMT_BGR48  AV_PIX_FMT_NE(BGR48BE,  BGR48LE)\n#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)\n#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)\n#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)\n\n#define AV_PIX_FMT_YUV420P9  AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)\n#define AV_PIX_FMT_YUV422P9  AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)\n#define AV_PIX_FMT_YUV444P9  AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE)\n#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)\n#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)\n#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)\n#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE)\n#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE)\n#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE)\n#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE)\n#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE)\n#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE)\n#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)\n#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)\n#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)\n\n#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)\n#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)\n#define AV_PIX_FMT_GBRP9     AV_PIX_FMT_NE(GBRP9BE ,    GBRP9LE)\n#define AV_PIX_FMT_GBRP10    AV_PIX_FMT_NE(GBRP10BE,    GBRP10LE)\n#define AV_PIX_FMT_GBRP12    AV_PIX_FMT_NE(GBRP12BE,    GBRP12LE)\n#define AV_PIX_FMT_GBRP14    AV_PIX_FMT_NE(GBRP14BE,    GBRP14LE)\n#define AV_PIX_FMT_GBRP16    AV_PIX_FMT_NE(GBRP16BE,    GBRP16LE)\n#define AV_PIX_FMT_GBRAP16   AV_PIX_FMT_NE(GBRAP16BE,   GBRAP16LE)\n\n#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE,    BAYER_BGGR16LE)\n#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE,    BAYER_RGGB16LE)\n#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE,    BAYER_GBRG16LE)\n#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE,    BAYER_GRBG16LE)\n\n\n#define AV_PIX_FMT_YUVA420P9  AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)\n#define AV_PIX_FMT_YUVA422P9  AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)\n#define AV_PIX_FMT_YUVA444P9  AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)\n#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)\n#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)\n#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)\n#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)\n#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)\n#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)\n\n#define AV_PIX_FMT_XYZ12      AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)\n#define AV_PIX_FMT_NV20       AV_PIX_FMT_NE(NV20BE,  NV20LE)\n\n#if FF_API_PIX_FMT\n#define PixelFormat AVPixelFormat\n\n#define PIX_FMT_Y400A AV_PIX_FMT_Y400A\n#define PIX_FMT_GBR24P AV_PIX_FMT_GBR24P\n\n#define PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le)\n\n#define PIX_FMT_RGB32   AV_PIX_FMT_RGB32\n#define PIX_FMT_RGB32_1 AV_PIX_FMT_RGB32_1\n#define PIX_FMT_BGR32   AV_PIX_FMT_BGR32\n#define PIX_FMT_BGR32_1 AV_PIX_FMT_BGR32_1\n#define PIX_FMT_0RGB32  AV_PIX_FMT_0RGB32\n#define PIX_FMT_0BGR32  AV_PIX_FMT_0BGR32\n\n#define PIX_FMT_GRAY16 AV_PIX_FMT_GRAY16\n#define PIX_FMT_RGB48  AV_PIX_FMT_RGB48\n#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565\n#define PIX_FMT_RGB555 AV_PIX_FMT_RGB555\n#define PIX_FMT_RGB444 AV_PIX_FMT_RGB444\n#define PIX_FMT_BGR48  AV_PIX_FMT_BGR48\n#define PIX_FMT_BGR565 AV_PIX_FMT_BGR565\n#define PIX_FMT_BGR555 AV_PIX_FMT_BGR555\n#define PIX_FMT_BGR444 AV_PIX_FMT_BGR444\n\n#define PIX_FMT_YUV420P9  AV_PIX_FMT_YUV420P9\n#define PIX_FMT_YUV422P9  AV_PIX_FMT_YUV422P9\n#define PIX_FMT_YUV444P9  AV_PIX_FMT_YUV444P9\n#define PIX_FMT_YUV420P10 AV_PIX_FMT_YUV420P10\n#define PIX_FMT_YUV422P10 AV_PIX_FMT_YUV422P10\n#define PIX_FMT_YUV444P10 AV_PIX_FMT_YUV444P10\n#define PIX_FMT_YUV420P12 AV_PIX_FMT_YUV420P12\n#define PIX_FMT_YUV422P12 AV_PIX_FMT_YUV422P12\n#define PIX_FMT_YUV444P12 AV_PIX_FMT_YUV444P12\n#define PIX_FMT_YUV420P14 AV_PIX_FMT_YUV420P14\n#define PIX_FMT_YUV422P14 AV_PIX_FMT_YUV422P14\n#define PIX_FMT_YUV444P14 AV_PIX_FMT_YUV444P14\n#define PIX_FMT_YUV420P16 AV_PIX_FMT_YUV420P16\n#define PIX_FMT_YUV422P16 AV_PIX_FMT_YUV422P16\n#define PIX_FMT_YUV444P16 AV_PIX_FMT_YUV444P16\n\n#define PIX_FMT_RGBA64 AV_PIX_FMT_RGBA64\n#define PIX_FMT_BGRA64 AV_PIX_FMT_BGRA64\n#define PIX_FMT_GBRP9  AV_PIX_FMT_GBRP9\n#define PIX_FMT_GBRP10 AV_PIX_FMT_GBRP10\n#define PIX_FMT_GBRP12 AV_PIX_FMT_GBRP12\n#define PIX_FMT_GBRP14 AV_PIX_FMT_GBRP14\n#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16\n#endif\n\n#endif /* AVUTIL_PIXFMT_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/random_seed.h",
    "content": "/*\n * Copyright (c) 2009 Baptiste Coudurier <baptiste.coudurier@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_RANDOM_SEED_H\n#define AVUTIL_RANDOM_SEED_H\n\n#include <stdint.h>\n/**\n * @addtogroup lavu_crypto\n * @{\n */\n\n/**\n * Get a seed to use in conjunction with random functions.\n * This function tries to provide a good seed at a best effort bases.\n * Its possible to call this function multiple times if more bits are needed.\n * It can be quite slow, which is why it should only be used as seed for a faster\n * PRNG. The quality of the seed depends on the platform.\n */\nuint32_t av_get_random_seed(void);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_RANDOM_SEED_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/rational.h",
    "content": "/*\n * rational numbers\n * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * rational numbers\n * @author Michael Niedermayer <michaelni@gmx.at>\n */\n\n#ifndef AVUTIL_RATIONAL_H\n#define AVUTIL_RATIONAL_H\n\n#include <stdint.h>\n#include <limits.h>\n#include \"attributes.h\"\n\n/**\n * @addtogroup lavu_math\n * @{\n */\n\n/**\n * rational number numerator/denominator\n */\ntypedef struct AVRational{\n    int num; ///< numerator\n    int den; ///< denominator\n} AVRational;\n\n/**\n * Create a rational.\n * Useful for compilers that do not support compound literals.\n * @note  The return value is not reduced.\n */\nstatic inline AVRational av_make_q(int num, int den)\n{\n    AVRational r = { num, den };\n    return r;\n}\n\n/**\n * Compare two rationals.\n * @param a first rational\n * @param b second rational\n * @return 0 if a==b, 1 if a>b, -1 if a<b, and INT_MIN if one of the\n * values is of the form 0/0\n */\nstatic inline int av_cmp_q(AVRational a, AVRational b){\n    const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den;\n\n    if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1;\n    else if(b.den && a.den) return 0;\n    else if(a.num && b.num) return (a.num>>31) - (b.num>>31);\n    else                    return INT_MIN;\n}\n\n/**\n * Convert rational to double.\n * @param a rational to convert\n * @return (double) a\n */\nstatic inline double av_q2d(AVRational a){\n    return a.num / (double) a.den;\n}\n\n/**\n * Reduce a fraction.\n * This is useful for framerate calculations.\n * @param dst_num destination numerator\n * @param dst_den destination denominator\n * @param num source numerator\n * @param den source denominator\n * @param max the maximum allowed for dst_num & dst_den\n * @return 1 if exact, 0 otherwise\n */\nint av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max);\n\n/**\n * Multiply two rationals.\n * @param b first rational\n * @param c second rational\n * @return b*c\n */\nAVRational av_mul_q(AVRational b, AVRational c) av_const;\n\n/**\n * Divide one rational by another.\n * @param b first rational\n * @param c second rational\n * @return b/c\n */\nAVRational av_div_q(AVRational b, AVRational c) av_const;\n\n/**\n * Add two rationals.\n * @param b first rational\n * @param c second rational\n * @return b+c\n */\nAVRational av_add_q(AVRational b, AVRational c) av_const;\n\n/**\n * Subtract one rational from another.\n * @param b first rational\n * @param c second rational\n * @return b-c\n */\nAVRational av_sub_q(AVRational b, AVRational c) av_const;\n\n/**\n * Invert a rational.\n * @param q value\n * @return 1 / q\n */\nstatic av_always_inline AVRational av_inv_q(AVRational q)\n{\n    AVRational r = { q.den, q.num };\n    return r;\n}\n\n/**\n * Convert a double precision floating point number to a rational.\n * inf is expressed as {1,0} or {-1,0} depending on the sign.\n *\n * @param d double to convert\n * @param max the maximum allowed numerator and denominator\n * @return (AVRational) d\n */\nAVRational av_d2q(double d, int max) av_const;\n\n/**\n * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer\n * than q1, 0 if they have the same distance.\n */\nint av_nearer_q(AVRational q, AVRational q1, AVRational q2);\n\n/**\n * Find the nearest value in q_list to q.\n * @param q_list an array of rationals terminated by {0, 0}\n * @return the index of the nearest value found in the array\n */\nint av_find_nearest_q_idx(AVRational q, const AVRational* q_list);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_RATIONAL_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/ripemd.h",
    "content": "/*\n * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>\n * Copyright (C) 2013 James Almer <jamrial@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_RIPEMD_H\n#define AVUTIL_RIPEMD_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n/**\n * @defgroup lavu_ripemd RIPEMD\n * @ingroup lavu_crypto\n * @{\n */\n\nextern const int av_ripemd_size;\n\nstruct AVRIPEMD;\n\n/**\n * Allocate an AVRIPEMD context.\n */\nstruct AVRIPEMD *av_ripemd_alloc(void);\n\n/**\n * Initialize RIPEMD hashing.\n *\n * @param context pointer to the function context (of size av_ripemd_size)\n * @param bits    number of bits in digest (128, 160, 256 or 320 bits)\n * @return        zero if initialization succeeded, -1 otherwise\n */\nint av_ripemd_init(struct AVRIPEMD* context, int bits);\n\n/**\n * Update hash value.\n *\n * @param context hash function context\n * @param data    input data to update hash with\n * @param len     input data length\n */\nvoid av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len);\n\n/**\n * Finish hashing and output digest value.\n *\n * @param context hash function context\n * @param digest  buffer where output digest value is stored\n */\nvoid av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_RIPEMD_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/samplefmt.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_SAMPLEFMT_H\n#define AVUTIL_SAMPLEFMT_H\n\n#include <stdint.h>\n\n#include \"avutil.h\"\n#include \"attributes.h\"\n\n/**\n * Audio Sample Formats\n *\n * @par\n * The data described by the sample format is always in native-endian order.\n * Sample values can be expressed by native C types, hence the lack of a signed\n * 24-bit sample format even though it is a common raw audio data format.\n *\n * @par\n * The floating-point formats are based on full volume being in the range\n * [-1.0, 1.0]. Any values outside this range are beyond full volume level.\n *\n * @par\n * The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg\n * (such as AVFrame in libavcodec) is as follows:\n *\n * For planar sample formats, each audio channel is in a separate data plane,\n * and linesize is the buffer size, in bytes, for a single plane. All data\n * planes must be the same size. For packed sample formats, only the first data\n * plane is used, and samples for each channel are interleaved. In this case,\n * linesize is the buffer size, in bytes, for the 1 plane.\n */\nenum AVSampleFormat {\n    AV_SAMPLE_FMT_NONE = -1,\n    AV_SAMPLE_FMT_U8,          ///< unsigned 8 bits\n    AV_SAMPLE_FMT_S16,         ///< signed 16 bits\n    AV_SAMPLE_FMT_S32,         ///< signed 32 bits\n    AV_SAMPLE_FMT_FLT,         ///< float\n    AV_SAMPLE_FMT_DBL,         ///< double\n\n    AV_SAMPLE_FMT_U8P,         ///< unsigned 8 bits, planar\n    AV_SAMPLE_FMT_S16P,        ///< signed 16 bits, planar\n    AV_SAMPLE_FMT_S32P,        ///< signed 32 bits, planar\n    AV_SAMPLE_FMT_FLTP,        ///< float, planar\n    AV_SAMPLE_FMT_DBLP,        ///< double, planar\n\n    AV_SAMPLE_FMT_NB           ///< Number of sample formats. DO NOT USE if linking dynamically\n};\n\n/**\n * Return the name of sample_fmt, or NULL if sample_fmt is not\n * recognized.\n */\nconst char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);\n\n/**\n * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE\n * on error.\n */\nenum AVSampleFormat av_get_sample_fmt(const char *name);\n\n/**\n * Return the planar<->packed alternative form of the given sample format, or\n * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the\n * requested planar/packed format, the format returned is the same as the\n * input.\n */\nenum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar);\n\n/**\n * Get the packed alternative form of the given sample format.\n *\n * If the passed sample_fmt is already in packed format, the format returned is\n * the same as the input.\n *\n * @return  the packed alternative form of the given sample format or\n            AV_SAMPLE_FMT_NONE on error.\n */\nenum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt);\n\n/**\n * Get the planar alternative form of the given sample format.\n *\n * If the passed sample_fmt is already in planar format, the format returned is\n * the same as the input.\n *\n * @return  the planar alternative form of the given sample format or\n            AV_SAMPLE_FMT_NONE on error.\n */\nenum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt);\n\n/**\n * Generate a string corresponding to the sample format with\n * sample_fmt, or a header if sample_fmt is negative.\n *\n * @param buf the buffer where to write the string\n * @param buf_size the size of buf\n * @param sample_fmt the number of the sample format to print the\n * corresponding info string, or a negative value to print the\n * corresponding header.\n * @return the pointer to the filled buffer or NULL if sample_fmt is\n * unknown or in case of other errors\n */\nchar *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt);\n\n#if FF_API_GET_BITS_PER_SAMPLE_FMT\n/**\n * @deprecated Use av_get_bytes_per_sample() instead.\n */\nattribute_deprecated\nint av_get_bits_per_sample_fmt(enum AVSampleFormat sample_fmt);\n#endif\n\n/**\n * Return number of bytes per sample.\n *\n * @param sample_fmt the sample format\n * @return number of bytes per sample or zero if unknown for the given\n * sample format\n */\nint av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);\n\n/**\n * Check if the sample format is planar.\n *\n * @param sample_fmt the sample format to inspect\n * @return 1 if the sample format is planar, 0 if it is interleaved\n */\nint av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);\n\n/**\n * Get the required buffer size for the given audio parameters.\n *\n * @param[out] linesize calculated linesize, may be NULL\n * @param nb_channels   the number of channels\n * @param nb_samples    the number of samples in a single channel\n * @param sample_fmt    the sample format\n * @param align         buffer size alignment (0 = default, 1 = no alignment)\n * @return              required buffer size, or negative error code on failure\n */\nint av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,\n                               enum AVSampleFormat sample_fmt, int align);\n\n/**\n * Fill plane data pointers and linesize for samples with sample\n * format sample_fmt.\n *\n * The audio_data array is filled with the pointers to the samples data planes:\n * for planar, set the start point of each channel's data within the buffer,\n * for packed, set the start point of the entire buffer only.\n *\n * The value pointed to by linesize is set to the aligned size of each\n * channel's data buffer for planar layout, or to the aligned size of the\n * buffer for all channels for packed layout.\n *\n * The buffer in buf must be big enough to contain all the samples\n * (use av_samples_get_buffer_size() to compute its minimum size),\n * otherwise the audio_data pointers will point to invalid data.\n *\n * @see enum AVSampleFormat\n * The documentation for AVSampleFormat describes the data layout.\n *\n * @param[out] audio_data  array to be filled with the pointer for each channel\n * @param[out] linesize    calculated linesize, may be NULL\n * @param buf              the pointer to a buffer containing the samples\n * @param nb_channels      the number of channels\n * @param nb_samples       the number of samples in a single channel\n * @param sample_fmt       the sample format\n * @param align            buffer size alignment (0 = default, 1 = no alignment)\n * @return                 >=0 on success or a negative error code on failure\n * @todo return minimum size in bytes required for the buffer in case\n * of success at the next bump\n */\nint av_samples_fill_arrays(uint8_t **audio_data, int *linesize,\n                           const uint8_t *buf,\n                           int nb_channels, int nb_samples,\n                           enum AVSampleFormat sample_fmt, int align);\n\n/**\n * Allocate a samples buffer for nb_samples samples, and fill data pointers and\n * linesize accordingly.\n * The allocated samples buffer can be freed by using av_freep(&audio_data[0])\n * Allocated data will be initialized to silence.\n *\n * @see enum AVSampleFormat\n * The documentation for AVSampleFormat describes the data layout.\n *\n * @param[out] audio_data  array to be filled with the pointer for each channel\n * @param[out] linesize    aligned size for audio buffer(s), may be NULL\n * @param nb_channels      number of audio channels\n * @param nb_samples       number of samples per channel\n * @param align            buffer size alignment (0 = default, 1 = no alignment)\n * @return                 >=0 on success or a negative error code on failure\n * @todo return the size of the allocated buffer in case of success at the next bump\n * @see av_samples_fill_arrays()\n * @see av_samples_alloc_array_and_samples()\n */\nint av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,\n                     int nb_samples, enum AVSampleFormat sample_fmt, int align);\n\n/**\n * Allocate a data pointers array, samples buffer for nb_samples\n * samples, and fill data pointers and linesize accordingly.\n *\n * This is the same as av_samples_alloc(), but also allocates the data\n * pointers array.\n *\n * @see av_samples_alloc()\n */\nint av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels,\n                                       int nb_samples, enum AVSampleFormat sample_fmt, int align);\n\n/**\n * Copy samples from src to dst.\n *\n * @param dst destination array of pointers to data planes\n * @param src source array of pointers to data planes\n * @param dst_offset offset in samples at which the data will be written to dst\n * @param src_offset offset in samples at which the data will be read from src\n * @param nb_samples number of samples to be copied\n * @param nb_channels number of audio channels\n * @param sample_fmt audio sample format\n */\nint av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset,\n                    int src_offset, int nb_samples, int nb_channels,\n                    enum AVSampleFormat sample_fmt);\n\n/**\n * Fill an audio buffer with silence.\n *\n * @param audio_data  array of pointers to data planes\n * @param offset      offset in samples at which to start filling\n * @param nb_samples  number of samples to fill\n * @param nb_channels number of audio channels\n * @param sample_fmt  audio sample format\n */\nint av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples,\n                           int nb_channels, enum AVSampleFormat sample_fmt);\n\n#endif /* AVUTIL_SAMPLEFMT_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/sha.h",
    "content": "/*\n * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_SHA_H\n#define AVUTIL_SHA_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n/**\n * @defgroup lavu_sha SHA\n * @ingroup lavu_crypto\n * @{\n */\n\nextern const int av_sha_size;\n\nstruct AVSHA;\n\n/**\n * Allocate an AVSHA context.\n */\nstruct AVSHA *av_sha_alloc(void);\n\n/**\n * Initialize SHA-1 or SHA-2 hashing.\n *\n * @param context pointer to the function context (of size av_sha_size)\n * @param bits    number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits)\n * @return        zero if initialization succeeded, -1 otherwise\n */\nint av_sha_init(struct AVSHA* context, int bits);\n\n/**\n * Update hash value.\n *\n * @param context hash function context\n * @param data    input data to update hash with\n * @param len     input data length\n */\nvoid av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len);\n\n/**\n * Finish hashing and output digest value.\n *\n * @param context hash function context\n * @param digest  buffer where output digest value is stored\n */\nvoid av_sha_final(struct AVSHA* context, uint8_t *digest);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_SHA_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/sha512.h",
    "content": "/*\n * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>\n * Copyright (C) 2013 James Almer <jamrial@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_SHA512_H\n#define AVUTIL_SHA512_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n/**\n * @defgroup lavu_sha512 SHA512\n * @ingroup lavu_crypto\n * @{\n */\n\nextern const int av_sha512_size;\n\nstruct AVSHA512;\n\n/**\n * Allocate an AVSHA512 context.\n */\nstruct AVSHA512 *av_sha512_alloc(void);\n\n/**\n * Initialize SHA-2 512 hashing.\n *\n * @param context pointer to the function context (of size av_sha512_size)\n * @param bits    number of bits in digest (224, 256, 384 or 512 bits)\n * @return        zero if initialization succeeded, -1 otherwise\n */\nint av_sha512_init(struct AVSHA512* context, int bits);\n\n/**\n * Update hash value.\n *\n * @param context hash function context\n * @param data    input data to update hash with\n * @param len     input data length\n */\nvoid av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len);\n\n/**\n * Finish hashing and output digest value.\n *\n * @param context hash function context\n * @param digest  buffer where output digest value is stored\n */\nvoid av_sha512_final(struct AVSHA512* context, uint8_t *digest);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_SHA512_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/stereo3d.h",
    "content": "/*\n * Copyright (c) 2013 Vittorio Giovara <vittorio.giovara@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#include <stdint.h>\n\n#include \"frame.h\"\n\n/**\n * List of possible 3D Types\n */\nenum AVStereo3DType {\n    /**\n     * Video is not stereoscopic (and metadata has to be there).\n     */\n    AV_STEREO3D_2D,\n\n    /**\n     * Views are next to each other.\n     *\n     *    LLLLRRRR\n     *    LLLLRRRR\n     *    LLLLRRRR\n     *    ...\n     */\n    AV_STEREO3D_SIDEBYSIDE,\n\n    /**\n     * Views are on top of each other.\n     *\n     *    LLLLLLLL\n     *    LLLLLLLL\n     *    RRRRRRRR\n     *    RRRRRRRR\n     */\n    AV_STEREO3D_TOPBOTTOM,\n\n    /**\n     * Views are alternated temporally.\n     *\n     *     frame0   frame1   frame2   ...\n     *    LLLLLLLL RRRRRRRR LLLLLLLL\n     *    LLLLLLLL RRRRRRRR LLLLLLLL\n     *    LLLLLLLL RRRRRRRR LLLLLLLL\n     *    ...      ...      ...\n     */\n    AV_STEREO3D_FRAMESEQUENCE,\n\n    /**\n     * Views are packed in a checkerboard-like structure per pixel.\n     *\n     *    LRLRLRLR\n     *    RLRLRLRL\n     *    LRLRLRLR\n     *    ...\n     */\n    AV_STEREO3D_CHECKERBOARD,\n\n    /**\n     * Views are next to each other, but when upscaling\n     * apply a checkerboard pattern.\n     *\n     *     LLLLRRRR          L L L L    R R R R\n     *     LLLLRRRR    =>     L L L L  R R R R\n     *     LLLLRRRR          L L L L    R R R R\n     *     LLLLRRRR           L L L L  R R R R\n     */\n    AV_STEREO3D_SIDEBYSIDE_QUINCUNX,\n\n    /**\n     * Views are packed per line, as if interlaced.\n     *\n     *    LLLLLLLL\n     *    RRRRRRRR\n     *    LLLLLLLL\n     *    ...\n     */\n    AV_STEREO3D_LINES,\n\n    /**\n     * Views are packed per column.\n     *\n     *    LRLRLRLR\n     *    LRLRLRLR\n     *    LRLRLRLR\n     *    ...\n     */\n    AV_STEREO3D_COLUMNS,\n};\n\n\n/**\n * Inverted views, Right/Bottom represents the left view.\n */\n#define AV_STEREO3D_FLAG_INVERT     (1 << 0)\n\n/**\n * Stereo 3D type: this structure describes how two videos are packed\n * within a single video surface, with additional information as needed.\n *\n * @note The struct must be allocated with av_stereo3d_alloc() and\n *       its size is not a part of the public ABI.\n */\ntypedef struct AVStereo3D {\n    /**\n     * How views are packed within the video.\n     */\n    enum AVStereo3DType type;\n\n    /**\n     * Additional information about the frame packing.\n     */\n    int flags;\n} AVStereo3D;\n\n/**\n * Allocate an AVStereo3D structure and set its fields to default values.\n * The resulting struct can be freed using av_freep().\n *\n * @return An AVStereo3D filled with default values or NULL on failure.\n */\nAVStereo3D *av_stereo3d_alloc(void);\n\n/**\n * Allocate a complete AVFrameSideData and add it to the frame.\n *\n * @param frame The frame which side data is added to.\n *\n * @return The AVStereo3D structure to be filled by caller.\n */\nAVStereo3D *av_stereo3d_create_side_data(AVFrame *frame);\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/time.h",
    "content": "/*\n * Copyright (c) 2000-2003 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_TIME_H\n#define AVUTIL_TIME_H\n\n#include <stdint.h>\n\n/**\n * Get the current time in microseconds.\n */\nint64_t av_gettime(void);\n\n/**\n * Sleep for a period of time.  Although the duration is expressed in\n * microseconds, the actual delay may be rounded to the precision of the\n * system timer.\n *\n * @param  usec Number of microseconds to sleep.\n * @return zero on success or (negative) error code.\n */\nint av_usleep(unsigned usec);\n\n#endif /* AVUTIL_TIME_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/timecode.h",
    "content": "/*\n * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier <baptiste.coudurier@gmail.com>\n * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * Timecode helpers header\n */\n\n#ifndef AVUTIL_TIMECODE_H\n#define AVUTIL_TIMECODE_H\n\n#include <stdint.h>\n#include \"rational.h\"\n\n#define AV_TIMECODE_STR_SIZE 16\n\nenum AVTimecodeFlag {\n    AV_TIMECODE_FLAG_DROPFRAME      = 1<<0, ///< timecode is drop frame\n    AV_TIMECODE_FLAG_24HOURSMAX     = 1<<1, ///< timecode wraps after 24 hours\n    AV_TIMECODE_FLAG_ALLOWNEGATIVE  = 1<<2, ///< negative time values are allowed\n};\n\ntypedef struct {\n    int start;          ///< timecode frame start (first base frame number)\n    uint32_t flags;     ///< flags such as drop frame, +24 hours support, ...\n    AVRational rate;    ///< frame rate in rational form\n    unsigned fps;       ///< frame per second; must be consistent with the rate field\n} AVTimecode;\n\n/**\n * Adjust frame number for NTSC drop frame time code.\n *\n * @param framenum frame number to adjust\n * @param fps      frame per second, 30 or 60\n * @return         adjusted frame number\n * @warning        adjustment is only valid in NTSC 29.97 and 59.94\n */\nint av_timecode_adjust_ntsc_framenum2(int framenum, int fps);\n\n/**\n * Convert frame number to SMPTE 12M binary representation.\n *\n * @param tc       timecode data correctly initialized\n * @param framenum frame number\n * @return         the SMPTE binary representation\n *\n * @note Frame number adjustment is automatically done in case of drop timecode,\n *       you do NOT have to call av_timecode_adjust_ntsc_framenum2().\n * @note The frame number is relative to tc->start.\n * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity\n *       correction (PC) bits are set to zero.\n */\nuint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum);\n\n/**\n * Load timecode string in buf.\n *\n * @param buf      destination buffer, must be at least AV_TIMECODE_STR_SIZE long\n * @param tc       timecode data correctly initialized\n * @param framenum frame number\n * @return         the buf parameter\n *\n * @note Timecode representation can be a negative timecode and have more than\n *       24 hours, but will only be honored if the flags are correctly set.\n * @note The frame number is relative to tc->start.\n */\nchar *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum);\n\n/**\n * Get the timecode string from the SMPTE timecode format.\n *\n * @param buf        destination buffer, must be at least AV_TIMECODE_STR_SIZE long\n * @param tcsmpte    the 32-bit SMPTE timecode\n * @param prevent_df prevent the use of a drop flag when it is known the DF bit\n *                   is arbitrary\n * @return           the buf parameter\n */\nchar *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df);\n\n/**\n * Get the timecode string from the 25-bit timecode format (MPEG GOP format).\n *\n * @param buf     destination buffer, must be at least AV_TIMECODE_STR_SIZE long\n * @param tc25bit the 25-bits timecode\n * @return        the buf parameter\n */\nchar *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit);\n\n/**\n * Init a timecode struct with the passed parameters.\n *\n * @param log_ctx     a pointer to an arbitrary struct of which the first field\n *                    is a pointer to an AVClass struct (used for av_log)\n * @param tc          pointer to an allocated AVTimecode\n * @param rate        frame rate in rational form\n * @param flags       miscellaneous flags such as drop frame, +24 hours, ...\n *                    (see AVTimecodeFlag)\n * @param frame_start the first frame number\n * @return            0 on success, AVERROR otherwise\n */\nint av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx);\n\n/**\n * Parse timecode representation (hh:mm:ss[:;.]ff).\n *\n * @param log_ctx a pointer to an arbitrary struct of which the first field is a\n *                pointer to an AVClass struct (used for av_log).\n * @param tc      pointer to an allocated AVTimecode\n * @param rate    frame rate in rational form\n * @param str     timecode string which will determine the frame start\n * @return        0 on success, AVERROR otherwise\n */\nint av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx);\n\n/**\n * Check if the timecode feature is available for the given frame rate\n *\n * @return 0 if supported, <0 otherwise\n */\nint av_timecode_check_frame_rate(AVRational rate);\n\n#endif /* AVUTIL_TIMECODE_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/timestamp.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * timestamp utils, mostly useful for debugging/logging purposes\n */\n\n#ifndef AVUTIL_TIMESTAMP_H\n#define AVUTIL_TIMESTAMP_H\n\n#include \"common.h\"\n\n#if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS) && !defined(PRId64)\n#error missing -D__STDC_FORMAT_MACROS / #define __STDC_FORMAT_MACROS\n#endif\n\n#define AV_TS_MAX_STRING_SIZE 32\n\n/**\n * Fill the provided buffer with a string containing a timestamp\n * representation.\n *\n * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE\n * @param ts the timestamp to represent\n * @return the buffer in input\n */\nstatic inline char *av_ts_make_string(char *buf, int64_t ts)\n{\n    if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, \"NOPTS\");\n    else                      snprintf(buf, AV_TS_MAX_STRING_SIZE, \"%\"PRId64, ts);\n    return buf;\n}\n\n/**\n * Convenience macro, the return value should be used only directly in\n * function arguments but never stand-alone.\n */\n#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts)\n\n/**\n * Fill the provided buffer with a string containing a timestamp time\n * representation.\n *\n * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE\n * @param ts the timestamp to represent\n * @param tb the timebase of the timestamp\n * @return the buffer in input\n */\nstatic inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb)\n{\n    if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, \"NOPTS\");\n    else                      snprintf(buf, AV_TS_MAX_STRING_SIZE, \"%.6g\", av_q2d(*tb) * ts);\n    return buf;\n}\n\n/**\n * Convenience macro, the return value should be used only directly in\n * function arguments but never stand-alone.\n */\n#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb)\n\n#endif /* AVUTIL_TIMESTAMP_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/version.h",
    "content": "/*\n * copyright (c) 2003 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_VERSION_H\n#define AVUTIL_VERSION_H\n\n#include \"macros.h\"\n\n/**\n * @defgroup version_utils Library Version Macros\n *\n * Useful to check and match library version in order to maintain\n * backward compatibility.\n *\n * @{\n */\n\n#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c)\n#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c\n#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)\n\n/**\n * @}\n */\n\n/**\n * @file\n * @ingroup lavu\n * Libavutil version macros\n */\n\n/**\n * @defgroup lavu_ver Version and Build diagnostics\n *\n * Macros and function useful to check at compiletime and at runtime\n * which version of libavutil is in use.\n *\n * @{\n */\n\n#define LIBAVUTIL_VERSION_MAJOR  52\n#define LIBAVUTIL_VERSION_MINOR  66\n#define LIBAVUTIL_VERSION_MICRO 100\n\n#define LIBAVUTIL_VERSION_INT   AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \\\n                                               LIBAVUTIL_VERSION_MINOR, \\\n                                               LIBAVUTIL_VERSION_MICRO)\n#define LIBAVUTIL_VERSION       AV_VERSION(LIBAVUTIL_VERSION_MAJOR,     \\\n                                           LIBAVUTIL_VERSION_MINOR,     \\\n                                           LIBAVUTIL_VERSION_MICRO)\n#define LIBAVUTIL_BUILD         LIBAVUTIL_VERSION_INT\n\n#define LIBAVUTIL_IDENT         \"Lavu\" AV_STRINGIFY(LIBAVUTIL_VERSION)\n\n/**\n * @}\n *\n * @defgroup depr_guards Deprecation guards\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n *\n * @{\n */\n\n#ifndef FF_API_GET_BITS_PER_SAMPLE_FMT\n#define FF_API_GET_BITS_PER_SAMPLE_FMT (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_FIND_OPT\n#define FF_API_FIND_OPT                 (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_OLD_AVOPTIONS\n#define FF_API_OLD_AVOPTIONS            (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_PIX_FMT\n#define FF_API_PIX_FMT                  (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_CONTEXT_SIZE\n#define FF_API_CONTEXT_SIZE             (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_PIX_FMT_DESC\n#define FF_API_PIX_FMT_DESC             (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_AV_REVERSE\n#define FF_API_AV_REVERSE               (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_AUDIOCONVERT\n#define FF_API_AUDIOCONVERT             (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_CPU_FLAG_MMX2\n#define FF_API_CPU_FLAG_MMX2            (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_SAMPLES_UTILS_RETURN_ZERO\n#define FF_API_SAMPLES_UTILS_RETURN_ZERO (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_LLS_PRIVATE\n#define FF_API_LLS_PRIVATE              (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_LLS1\n#define FF_API_LLS1                     (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_AVFRAME_LAVC\n#define FF_API_AVFRAME_LAVC             (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_VDPAU\n#define FF_API_VDPAU                    (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_GET_CHANNEL_LAYOUT_COMPAT\n#define FF_API_GET_CHANNEL_LAYOUT_COMPAT (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_OLD_OPENCL\n#define FF_API_OLD_OPENCL               (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_XVMC\n#define FF_API_XVMC                     (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_INTFLOAT\n#define FF_API_INTFLOAT                 (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n#ifndef FF_API_OPT_TYPE_METADATA\n#define FF_API_OPT_TYPE_METADATA        (LIBAVUTIL_VERSION_MAJOR < 54)\n#endif\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_VERSION_H */\n\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libavutil/xtea.h",
    "content": "/*\n * A 32-bit implementation of the XTEA algorithm\n * Copyright (c) 2012 Samuel Pitoiset\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_XTEA_H\n#define AVUTIL_XTEA_H\n\n#include <stdint.h>\n\n/**\n * @file\n * @brief Public header for libavutil XTEA algorithm\n * @defgroup lavu_xtea XTEA\n * @ingroup lavu_crypto\n * @{\n */\n\ntypedef struct AVXTEA {\n    uint32_t key[16];\n} AVXTEA;\n\n/**\n * Initialize an AVXTEA context.\n *\n * @param ctx an AVXTEA context\n * @param key a key of 16 bytes used for encryption/decryption\n */\nvoid av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]);\n\n/**\n * Encrypt or decrypt a buffer using a previously initialized context.\n *\n * @param ctx an AVXTEA context\n * @param dst destination array, can be equal to src\n * @param src source array, can be equal to dst\n * @param count number of 8 byte blocks\n * @param iv initialization vector for CBC mode, if NULL then ECB will be used\n * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src,\n                   int count, uint8_t *iv, int decrypt);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_XTEA_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libswresample/swresample.h",
    "content": "/*\n * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at)\n *\n * This file is part of libswresample\n *\n * libswresample is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * libswresample is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with libswresample; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef SWRESAMPLE_SWRESAMPLE_H\n#define SWRESAMPLE_SWRESAMPLE_H\n\n/**\n * @file\n * @ingroup lswr\n * libswresample public header\n */\n\n/**\n * @defgroup lswr Libswresample\n * @{\n *\n * Libswresample (lswr) is a library that handles audio resampling, sample\n * format conversion and mixing.\n *\n * Interaction with lswr is done through SwrContext, which is\n * allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters\n * must be set with the @ref avoptions API.\n *\n * For example the following code will setup conversion from planar float sample\n * format to interleaved signed 16-bit integer, downsampling from 48kHz to\n * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing\n * matrix):\n * @code\n * SwrContext *swr = swr_alloc();\n * av_opt_set_channel_layout(swr, \"in_channel_layout\",  AV_CH_LAYOUT_5POINT1, 0);\n * av_opt_set_channel_layout(swr, \"out_channel_layout\", AV_CH_LAYOUT_STEREO,  0);\n * av_opt_set_int(swr, \"in_sample_rate\",     48000,                0);\n * av_opt_set_int(swr, \"out_sample_rate\",    44100,                0);\n * av_opt_set_sample_fmt(swr, \"in_sample_fmt\",  AV_SAMPLE_FMT_FLTP, 0);\n * av_opt_set_sample_fmt(swr, \"out_sample_fmt\", AV_SAMPLE_FMT_S16,  0);\n * @endcode\n *\n * Once all values have been set, it must be initialized with swr_init(). If\n * you need to change the conversion parameters, you can change the parameters\n * as described above, or by using swr_alloc_set_opts(), then call swr_init()\n * again.\n *\n * The conversion itself is done by repeatedly calling swr_convert().\n * Note that the samples may get buffered in swr if you provide insufficient\n * output space or if sample rate conversion is done, which requires \"future\"\n * samples. Samples that do not require future input can be retrieved at any\n * time by using swr_convert() (in_count can be set to 0).\n * At the end of conversion the resampling buffer can be flushed by calling\n * swr_convert() with NULL in and 0 in_count.\n *\n * The delay between input and output, can at any time be found by using\n * swr_get_delay().\n *\n * The following code demonstrates the conversion loop assuming the parameters\n * from above and caller-defined functions get_input() and handle_output():\n * @code\n * uint8_t **input;\n * int in_samples;\n *\n * while (get_input(&input, &in_samples)) {\n *     uint8_t *output;\n *     int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) +\n *                                      in_samples, 44100, 48000, AV_ROUND_UP);\n *     av_samples_alloc(&output, NULL, 2, out_samples,\n *                      AV_SAMPLE_FMT_S16, 0);\n *     out_samples = swr_convert(swr, &output, out_samples,\n *                                      input, in_samples);\n *     handle_output(output, out_samples);\n *     av_freep(&output);\n * }\n * @endcode\n *\n * When the conversion is finished, the conversion\n * context and everything associated with it must be freed with swr_free().\n * There will be no memory leak if the data is not completely flushed before\n * swr_free().\n */\n\n#include <stdint.h>\n#include \"libavutil/samplefmt.h\"\n\n#include \"libswresample/version.h\"\n\n#if LIBSWRESAMPLE_VERSION_MAJOR < 1\n#define SWR_CH_MAX 32   ///< Maximum number of channels\n#endif\n\n#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate\n//TODO use int resample ?\n//long term TODO can we enable this dynamically?\n\nenum SwrDitherType {\n    SWR_DITHER_NONE = 0,\n    SWR_DITHER_RECTANGULAR,\n    SWR_DITHER_TRIANGULAR,\n    SWR_DITHER_TRIANGULAR_HIGHPASS,\n\n    SWR_DITHER_NS = 64,         ///< not part of API/ABI\n    SWR_DITHER_NS_LIPSHITZ,\n    SWR_DITHER_NS_F_WEIGHTED,\n    SWR_DITHER_NS_MODIFIED_E_WEIGHTED,\n    SWR_DITHER_NS_IMPROVED_E_WEIGHTED,\n    SWR_DITHER_NS_SHIBATA,\n    SWR_DITHER_NS_LOW_SHIBATA,\n    SWR_DITHER_NS_HIGH_SHIBATA,\n    SWR_DITHER_NB,              ///< not part of API/ABI\n};\n\n/** Resampling Engines */\nenum SwrEngine {\n    SWR_ENGINE_SWR,             /**< SW Resampler */\n    SWR_ENGINE_SOXR,            /**< SoX Resampler */\n    SWR_ENGINE_NB,              ///< not part of API/ABI\n};\n\n/** Resampling Filter Types */\nenum SwrFilterType {\n    SWR_FILTER_TYPE_CUBIC,              /**< Cubic */\n    SWR_FILTER_TYPE_BLACKMAN_NUTTALL,   /**< Blackman Nuttall Windowed Sinc */\n    SWR_FILTER_TYPE_KAISER,             /**< Kaiser Windowed Sinc */\n};\n\ntypedef struct SwrContext SwrContext;\n\n/**\n * Get the AVClass for swrContext. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *swr_get_class(void);\n\n/**\n * Allocate SwrContext.\n *\n * If you use this function you will need to set the parameters (manually or\n * with swr_alloc_set_opts()) before calling swr_init().\n *\n * @see swr_alloc_set_opts(), swr_init(), swr_free()\n * @return NULL on error, allocated context otherwise\n */\nstruct SwrContext *swr_alloc(void);\n\n/**\n * Initialize context after user parameters have been set.\n *\n * @return AVERROR error code in case of failure.\n */\nint swr_init(struct SwrContext *s);\n\n/**\n * Check whether an swr context has been initialized or not.\n *\n * @return positive if it has been initialized, 0 if not initialized\n */\nint swr_is_initialized(struct SwrContext *s);\n\n/**\n * Allocate SwrContext if needed and set/reset common parameters.\n *\n * This function does not require s to be allocated with swr_alloc(). On the\n * other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters\n * on the allocated context.\n *\n * @param s               Swr context, can be NULL\n * @param out_ch_layout   output channel layout (AV_CH_LAYOUT_*)\n * @param out_sample_fmt  output sample format (AV_SAMPLE_FMT_*).\n * @param out_sample_rate output sample rate (frequency in Hz)\n * @param in_ch_layout    input channel layout (AV_CH_LAYOUT_*)\n * @param in_sample_fmt   input sample format (AV_SAMPLE_FMT_*).\n * @param in_sample_rate  input sample rate (frequency in Hz)\n * @param log_offset      logging level offset\n * @param log_ctx         parent logging context, can be NULL\n *\n * @see swr_init(), swr_free()\n * @return NULL on error, allocated context otherwise\n */\nstruct SwrContext *swr_alloc_set_opts(struct SwrContext *s,\n                                      int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,\n                                      int64_t  in_ch_layout, enum AVSampleFormat  in_sample_fmt, int  in_sample_rate,\n                                      int log_offset, void *log_ctx);\n\n/**\n * Free the given SwrContext and set the pointer to NULL.\n */\nvoid swr_free(struct SwrContext **s);\n\n/**\n * Convert audio.\n *\n * in and in_count can be set to 0 to flush the last few samples out at the\n * end.\n *\n * If more input is provided than output space then the input will be buffered.\n * You can avoid this buffering by providing more output space than input.\n * Convertion will run directly without copying whenever possible.\n *\n * @param s         allocated Swr context, with parameters set\n * @param out       output buffers, only the first one need be set in case of packed audio\n * @param out_count amount of space available for output in samples per channel\n * @param in        input buffers, only the first one need to be set in case of packed audio\n * @param in_count  number of input samples available in one channel\n *\n * @return number of samples output per channel, negative value on error\n */\nint swr_convert(struct SwrContext *s, uint8_t **out, int out_count,\n                                const uint8_t **in , int in_count);\n\n/**\n * Convert the next timestamp from input to output\n * timestamps are in 1/(in_sample_rate * out_sample_rate) units.\n *\n * @note There are 2 slightly differently behaving modes.\n *       First is when automatic timestamp compensation is not used, (min_compensation >= FLT_MAX)\n *              in this case timestamps will be passed through with delays compensated\n *       Second is when automatic timestamp compensation is used, (min_compensation < FLT_MAX)\n *              in this case the output timestamps will match output sample numbers\n *\n * @param pts   timestamp for the next input sample, INT64_MIN if unknown\n * @return the output timestamp for the next output sample\n */\nint64_t swr_next_pts(struct SwrContext *s, int64_t pts);\n\n/**\n * Activate resampling compensation.\n */\nint swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance);\n\n/**\n * Set a customized input channel mapping.\n *\n * @param s           allocated Swr context, not yet initialized\n * @param channel_map customized input channel mapping (array of channel\n *                    indexes, -1 for a muted channel)\n * @return AVERROR error code in case of failure.\n */\nint swr_set_channel_mapping(struct SwrContext *s, const int *channel_map);\n\n/**\n * Set a customized remix matrix.\n *\n * @param s       allocated Swr context, not yet initialized\n * @param matrix  remix coefficients; matrix[i + stride * o] is\n *                the weight of input channel i in output channel o\n * @param stride  offset between lines of the matrix\n * @return  AVERROR error code in case of failure.\n */\nint swr_set_matrix(struct SwrContext *s, const double *matrix, int stride);\n\n/**\n * Drops the specified number of output samples.\n */\nint swr_drop_output(struct SwrContext *s, int count);\n\n/**\n * Injects the specified number of silence samples.\n */\nint swr_inject_silence(struct SwrContext *s, int count);\n\n/**\n * Gets the delay the next input sample will experience relative to the next output sample.\n *\n * Swresample can buffer data if more input has been provided than available\n * output space, also converting between sample rates needs a delay.\n * This function returns the sum of all such delays.\n * The exact delay is not necessarily an integer value in either input or\n * output sample rate. Especially when downsampling by a large value, the\n * output sample rate may be a poor choice to represent the delay, similarly\n * for upsampling and the input sample rate.\n *\n * @param s     swr context\n * @param base  timebase in which the returned delay will be\n *              if its set to 1 the returned delay is in seconds\n *              if its set to 1000 the returned delay is in milli seconds\n *              if its set to the input sample rate then the returned delay is in input samples\n *              if its set to the output sample rate then the returned delay is in output samples\n *              an exact rounding free delay can be found by using LCM(in_sample_rate, out_sample_rate)\n * @returns     the delay in 1/base units.\n */\nint64_t swr_get_delay(struct SwrContext *s, int64_t base);\n\n/**\n * Return the LIBSWRESAMPLE_VERSION_INT constant.\n */\nunsigned swresample_version(void);\n\n/**\n * Return the swr build-time configuration.\n */\nconst char *swresample_configuration(void);\n\n/**\n * Return the swr license.\n */\nconst char *swresample_license(void);\n\n/**\n * @}\n */\n\n#endif /* SWRESAMPLE_SWRESAMPLE_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libswresample/version.h",
    "content": "/*\n * Version macros.\n *\n * This file is part of libswresample\n *\n * libswresample is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * libswresample is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with libswresample; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef SWR_VERSION_H\n#define SWR_VERSION_H\n\n/**\n * @file\n * Libswresample version macros\n */\n\n#include \"libavutil/avutil.h\"\n\n#define LIBSWRESAMPLE_VERSION_MAJOR 0\n#define LIBSWRESAMPLE_VERSION_MINOR 18\n#define LIBSWRESAMPLE_VERSION_MICRO 100\n\n#define LIBSWRESAMPLE_VERSION_INT  AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \\\n                                                  LIBSWRESAMPLE_VERSION_MINOR, \\\n                                                  LIBSWRESAMPLE_VERSION_MICRO)\n#define LIBSWRESAMPLE_VERSION      AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \\\n                                              LIBSWRESAMPLE_VERSION_MINOR, \\\n                                              LIBSWRESAMPLE_VERSION_MICRO)\n#define LIBSWRESAMPLE_BUILD        LIBSWRESAMPLE_VERSION_INT\n\n#define LIBSWRESAMPLE_IDENT        \"SwR\" AV_STRINGIFY(LIBSWRESAMPLE_VERSION)\n\n#endif /* SWR_VERSION_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libswscale/swscale.h",
    "content": "/*\n * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef SWSCALE_SWSCALE_H\n#define SWSCALE_SWSCALE_H\n\n/**\n * @file\n * @ingroup libsws\n * external API header\n */\n\n#include <stdint.h>\n\n#include \"libavutil/avutil.h\"\n#include \"libavutil/log.h\"\n#include \"libavutil/pixfmt.h\"\n#include \"version.h\"\n\n/**\n * @defgroup libsws Color conversion and scaling\n * @{\n *\n * Return the LIBSWSCALE_VERSION_INT constant.\n */\nunsigned swscale_version(void);\n\n/**\n * Return the libswscale build-time configuration.\n */\nconst char *swscale_configuration(void);\n\n/**\n * Return the libswscale license.\n */\nconst char *swscale_license(void);\n\n/* values for the flags, the stuff on the command line is different */\n#define SWS_FAST_BILINEAR     1\n#define SWS_BILINEAR          2\n#define SWS_BICUBIC           4\n#define SWS_X                 8\n#define SWS_POINT          0x10\n#define SWS_AREA           0x20\n#define SWS_BICUBLIN       0x40\n#define SWS_GAUSS          0x80\n#define SWS_SINC          0x100\n#define SWS_LANCZOS       0x200\n#define SWS_SPLINE        0x400\n\n#define SWS_SRC_V_CHR_DROP_MASK     0x30000\n#define SWS_SRC_V_CHR_DROP_SHIFT    16\n\n#define SWS_PARAM_DEFAULT           123456\n\n#define SWS_PRINT_INFO              0x1000\n\n//the following 3 flags are not completely implemented\n//internal chrominace subsampling info\n#define SWS_FULL_CHR_H_INT    0x2000\n//input subsampling info\n#define SWS_FULL_CHR_H_INP    0x4000\n#define SWS_DIRECT_BGR        0x8000\n#define SWS_ACCURATE_RND      0x40000\n#define SWS_BITEXACT          0x80000\n#define SWS_ERROR_DIFFUSION  0x800000\n\n#if FF_API_SWS_CPU_CAPS\n/**\n * CPU caps are autodetected now, those flags\n * are only provided for API compatibility.\n */\n#define SWS_CPU_CAPS_MMX      0x80000000\n#define SWS_CPU_CAPS_MMXEXT   0x20000000\n#define SWS_CPU_CAPS_MMX2     0x20000000\n#define SWS_CPU_CAPS_3DNOW    0x40000000\n#define SWS_CPU_CAPS_ALTIVEC  0x10000000\n#define SWS_CPU_CAPS_BFIN     0x01000000\n#define SWS_CPU_CAPS_SSE2     0x02000000\n#endif\n\n#define SWS_MAX_REDUCE_CUTOFF 0.002\n\n#define SWS_CS_ITU709         1\n#define SWS_CS_FCC            4\n#define SWS_CS_ITU601         5\n#define SWS_CS_ITU624         5\n#define SWS_CS_SMPTE170M      5\n#define SWS_CS_SMPTE240M      7\n#define SWS_CS_DEFAULT        5\n\n/**\n * Return a pointer to yuv<->rgb coefficients for the given colorspace\n * suitable for sws_setColorspaceDetails().\n *\n * @param colorspace One of the SWS_CS_* macros. If invalid,\n * SWS_CS_DEFAULT is used.\n */\nconst int *sws_getCoefficients(int colorspace);\n\n// when used for filters they must have an odd number of elements\n// coeffs cannot be shared between vectors\ntypedef struct SwsVector {\n    double *coeff;              ///< pointer to the list of coefficients\n    int length;                 ///< number of coefficients in the vector\n} SwsVector;\n\n// vectors can be shared\ntypedef struct SwsFilter {\n    SwsVector *lumH;\n    SwsVector *lumV;\n    SwsVector *chrH;\n    SwsVector *chrV;\n} SwsFilter;\n\nstruct SwsContext;\n\n/**\n * Return a positive value if pix_fmt is a supported input format, 0\n * otherwise.\n */\nint sws_isSupportedInput(enum AVPixelFormat pix_fmt);\n\n/**\n * Return a positive value if pix_fmt is a supported output format, 0\n * otherwise.\n */\nint sws_isSupportedOutput(enum AVPixelFormat pix_fmt);\n\n/**\n * @param[in]  pix_fmt the pixel format\n * @return a positive value if an endianness conversion for pix_fmt is\n * supported, 0 otherwise.\n */\nint sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt);\n\n/**\n * Allocate an empty SwsContext. This must be filled and passed to\n * sws_init_context(). For filling see AVOptions, options.c and\n * sws_setColorspaceDetails().\n */\nstruct SwsContext *sws_alloc_context(void);\n\n/**\n * Initialize the swscaler context sws_context.\n *\n * @return zero or positive value on success, a negative value on\n * error\n */\nint sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);\n\n/**\n * Free the swscaler context swsContext.\n * If swsContext is NULL, then does nothing.\n */\nvoid sws_freeContext(struct SwsContext *swsContext);\n\n#if FF_API_SWS_GETCONTEXT\n/**\n * Allocate and return an SwsContext. You need it to perform\n * scaling/conversion operations using sws_scale().\n *\n * @param srcW the width of the source image\n * @param srcH the height of the source image\n * @param srcFormat the source image format\n * @param dstW the width of the destination image\n * @param dstH the height of the destination image\n * @param dstFormat the destination image format\n * @param flags specify which algorithm and options to use for rescaling\n * @return a pointer to an allocated context, or NULL in case of error\n * @note this function is to be removed after a saner alternative is\n *       written\n * @deprecated Use sws_getCachedContext() instead.\n */\nstruct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,\n                                  int dstW, int dstH, enum AVPixelFormat dstFormat,\n                                  int flags, SwsFilter *srcFilter,\n                                  SwsFilter *dstFilter, const double *param);\n#endif\n\n/**\n * Scale the image slice in srcSlice and put the resulting scaled\n * slice in the image in dst. A slice is a sequence of consecutive\n * rows in an image.\n *\n * Slices have to be provided in sequential order, either in\n * top-bottom or bottom-top order. If slices are provided in\n * non-sequential order the behavior of the function is undefined.\n *\n * @param c         the scaling context previously created with\n *                  sws_getContext()\n * @param srcSlice  the array containing the pointers to the planes of\n *                  the source slice\n * @param srcStride the array containing the strides for each plane of\n *                  the source image\n * @param srcSliceY the position in the source image of the slice to\n *                  process, that is the number (counted starting from\n *                  zero) in the image of the first row of the slice\n * @param srcSliceH the height of the source slice, that is the number\n *                  of rows in the slice\n * @param dst       the array containing the pointers to the planes of\n *                  the destination image\n * @param dstStride the array containing the strides for each plane of\n *                  the destination image\n * @return          the height of the output slice\n */\nint sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],\n              const int srcStride[], int srcSliceY, int srcSliceH,\n              uint8_t *const dst[], const int dstStride[]);\n\n/**\n * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg)\n * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg)\n * @param table the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x]\n * @param inv_table the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x]\n * @param brightness 16.16 fixed point brightness correction\n * @param contrast 16.16 fixed point contrast correction\n * @param saturation 16.16 fixed point saturation correction\n * @return -1 if not supported\n */\nint sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],\n                             int srcRange, const int table[4], int dstRange,\n                             int brightness, int contrast, int saturation);\n\n/**\n * @return -1 if not supported\n */\nint sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,\n                             int *srcRange, int **table, int *dstRange,\n                             int *brightness, int *contrast, int *saturation);\n\n/**\n * Allocate and return an uninitialized vector with length coefficients.\n */\nSwsVector *sws_allocVec(int length);\n\n/**\n * Return a normalized Gaussian curve used to filter stuff\n * quality = 3 is high quality, lower is lower quality.\n */\nSwsVector *sws_getGaussianVec(double variance, double quality);\n\n/**\n * Allocate and return a vector with length coefficients, all\n * with the same value c.\n */\nSwsVector *sws_getConstVec(double c, int length);\n\n/**\n * Allocate and return a vector with just one coefficient, with\n * value 1.0.\n */\nSwsVector *sws_getIdentityVec(void);\n\n/**\n * Scale all the coefficients of a by the scalar value.\n */\nvoid sws_scaleVec(SwsVector *a, double scalar);\n\n/**\n * Scale all the coefficients of a so that their sum equals height.\n */\nvoid sws_normalizeVec(SwsVector *a, double height);\nvoid sws_convVec(SwsVector *a, SwsVector *b);\nvoid sws_addVec(SwsVector *a, SwsVector *b);\nvoid sws_subVec(SwsVector *a, SwsVector *b);\nvoid sws_shiftVec(SwsVector *a, int shift);\n\n/**\n * Allocate and return a clone of the vector a, that is a vector\n * with the same coefficients as a.\n */\nSwsVector *sws_cloneVec(SwsVector *a);\n\n/**\n * Print with av_log() a textual representation of the vector a\n * if log_level <= av_log_level.\n */\nvoid sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level);\n\nvoid sws_freeVec(SwsVector *a);\n\nSwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,\n                                float lumaSharpen, float chromaSharpen,\n                                float chromaHShift, float chromaVShift,\n                                int verbose);\nvoid sws_freeFilter(SwsFilter *filter);\n\n/**\n * Check if context can be reused, otherwise reallocate a new one.\n *\n * If context is NULL, just calls sws_getContext() to get a new\n * context. Otherwise, checks if the parameters are the ones already\n * saved in context. If that is the case, returns the current\n * context. Otherwise, frees context and gets a new context with\n * the new parameters.\n *\n * Be warned that srcFilter and dstFilter are not checked, they\n * are assumed to remain the same.\n */\nstruct SwsContext *sws_getCachedContext(struct SwsContext *context,\n                                        int srcW, int srcH, enum AVPixelFormat srcFormat,\n                                        int dstW, int dstH, enum AVPixelFormat dstFormat,\n                                        int flags, SwsFilter *srcFilter,\n                                        SwsFilter *dstFilter, const double *param);\n\n/**\n * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits.\n *\n * The output frame will have the same packed format as the palette.\n *\n * @param src        source frame buffer\n * @param dst        destination frame buffer\n * @param num_pixels number of pixels to convert\n * @param palette    array with [256] entries, which must match color arrangement (RGB or BGR) of src\n */\nvoid sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);\n\n/**\n * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits.\n *\n * With the palette format \"ABCD\", the destination frame ends up with the format \"ABC\".\n *\n * @param src        source frame buffer\n * @param dst        destination frame buffer\n * @param num_pixels number of pixels to convert\n * @param palette    array with [256] entries, which must match color arrangement (RGB or BGR) of src\n */\nvoid sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);\n\n/**\n * Get the AVClass for swsContext. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *sws_get_class(void);\n\n/**\n * @}\n */\n\n#endif /* SWSCALE_SWSCALE_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/libswscale/version.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef SWSCALE_VERSION_H\n#define SWSCALE_VERSION_H\n\n/**\n * @file\n * swscale version macros\n */\n\n#include \"libavutil/version.h\"\n\n#define LIBSWSCALE_VERSION_MAJOR 2\n#define LIBSWSCALE_VERSION_MINOR 5\n#define LIBSWSCALE_VERSION_MICRO 102\n\n#define LIBSWSCALE_VERSION_INT  AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \\\n                                               LIBSWSCALE_VERSION_MINOR, \\\n                                               LIBSWSCALE_VERSION_MICRO)\n#define LIBSWSCALE_VERSION      AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \\\n                                           LIBSWSCALE_VERSION_MINOR, \\\n                                           LIBSWSCALE_VERSION_MICRO)\n#define LIBSWSCALE_BUILD        LIBSWSCALE_VERSION_INT\n\n#define LIBSWSCALE_IDENT        \"SwS\" AV_STRINGIFY(LIBSWSCALE_VERSION)\n\n/**\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n */\n\n#ifndef FF_API_SWS_GETCONTEXT\n#define FF_API_SWS_GETCONTEXT  (LIBSWSCALE_VERSION_MAJOR < 3)\n#endif\n#ifndef FF_API_SWS_CPU_CAPS\n#define FF_API_SWS_CPU_CAPS    (LIBSWSCALE_VERSION_MAJOR < 3)\n#endif\n#ifndef FF_API_SWS_FORMAT_NAME\n#define FF_API_SWS_FORMAT_NAME  (LIBSWSCALE_VERSION_MAJOR < 3)\n#endif\n\n#endif /* SWSCALE_VERSION_H */\n"
  },
  {
    "path": "src/3rdparty/ffmpeg/include/stdint.h",
    "content": "// ISO C9x  compliant stdint.h for Microsoft Visual Studio\n// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 \n// \n//  Copyright (c) 2006-2008 Alexander Chemeris\n// \n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n// \n//   1. Redistributions of source code must retain the above copyright notice,\n//      this list of conditions and the following disclaimer.\n// \n//   2. Redistributions in binary form must reproduce the above copyright\n//      notice, this list of conditions and the following disclaimer in the\n//      documentation and/or other materials provided with the distribution.\n// \n//   3. The name of the author may be used to endorse or promote products\n//      derived from this software without specific prior written permission.\n// \n// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED\n// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\n// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n// \n///////////////////////////////////////////////////////////////////////////////\n\n#ifndef _MSC_VER // [\n#error \"Use this header only with Microsoft Visual C++ compilers!\"\n#endif // _MSC_VER ]\n\n#ifndef _MSC_STDINT_H_ // [\n#define _MSC_STDINT_H_\n\n#if _MSC_VER > 1000\n#pragma once\n#endif\n\n#include <limits.h>\n\n// For Visual Studio 6 in C++ mode and for many Visual Studio versions when\n// compiling for ARM we should wrap <wchar.h> include with 'extern \"C++\" {}'\n// or compiler give many errors like this:\n//   error C2733: second C linkage of overloaded function 'wmemchr' not allowed\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n#  include <wchar.h>\n#ifdef __cplusplus\n}\n#endif\n\n// Define _W64 macros to mark types changing their size, like intptr_t.\n#ifndef _W64\n#  if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300\n#     define _W64 __w64\n#  else\n#     define _W64\n#  endif\n#endif\n\n\n// 7.18.1 Integer types\n\n// 7.18.1.1 Exact-width integer types\n\n// Visual Studio 6 and Embedded Visual C++ 4 doesn't\n// realize that, e.g. char has the same size as __int8\n// so we give up on __intX for them.\n#if (_MSC_VER < 1300)\n   typedef signed char       int8_t;\n   typedef signed short      int16_t;\n   typedef signed int        int32_t;\n   typedef unsigned char     uint8_t;\n   typedef unsigned short    uint16_t;\n   typedef unsigned int      uint32_t;\n#else\n   typedef signed __int8     int8_t;\n   typedef signed __int16    int16_t;\n   typedef signed __int32    int32_t;\n   typedef unsigned __int8   uint8_t;\n   typedef unsigned __int16  uint16_t;\n   typedef unsigned __int32  uint32_t;\n#endif\ntypedef signed __int64       int64_t;\ntypedef unsigned __int64     uint64_t;\n\n\n// 7.18.1.2 Minimum-width integer types\ntypedef int8_t    int_least8_t;\ntypedef int16_t   int_least16_t;\ntypedef int32_t   int_least32_t;\ntypedef int64_t   int_least64_t;\ntypedef uint8_t   uint_least8_t;\ntypedef uint16_t  uint_least16_t;\ntypedef uint32_t  uint_least32_t;\ntypedef uint64_t  uint_least64_t;\n\n// 7.18.1.3 Fastest minimum-width integer types\ntypedef int8_t    int_fast8_t;\ntypedef int16_t   int_fast16_t;\ntypedef int32_t   int_fast32_t;\ntypedef int64_t   int_fast64_t;\ntypedef uint8_t   uint_fast8_t;\ntypedef uint16_t  uint_fast16_t;\ntypedef uint32_t  uint_fast32_t;\ntypedef uint64_t  uint_fast64_t;\n\n// 7.18.1.4 Integer types capable of holding object pointers\n#ifdef _WIN64 // [\n   typedef signed __int64    intptr_t;\n   typedef unsigned __int64  uintptr_t;\n#else // _WIN64 ][\n   typedef _W64 signed int   intptr_t;\n   typedef _W64 unsigned int uintptr_t;\n#endif // _WIN64 ]\n\n// 7.18.1.5 Greatest-width integer types\ntypedef int64_t   intmax_t;\ntypedef uint64_t  uintmax_t;\n\n\n// 7.18.2 Limits of specified-width integer types\n\n#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [   See footnote 220 at page 257 and footnote 221 at page 259\n\n// 7.18.2.1 Limits of exact-width integer types\n#define INT8_MIN     ((int8_t)_I8_MIN)\n#define INT8_MAX     _I8_MAX\n#define INT16_MIN    ((int16_t)_I16_MIN)\n#define INT16_MAX    _I16_MAX\n#define INT32_MIN    ((int32_t)_I32_MIN)\n#define INT32_MAX    _I32_MAX\n#define INT64_MIN    ((int64_t)_I64_MIN)\n#define INT64_MAX    _I64_MAX\n#define UINT8_MAX    _UI8_MAX\n#define UINT16_MAX   _UI16_MAX\n#define UINT32_MAX   _UI32_MAX\n#define UINT64_MAX   _UI64_MAX\n\n// 7.18.2.2 Limits of minimum-width integer types\n#define INT_LEAST8_MIN    INT8_MIN\n#define INT_LEAST8_MAX    INT8_MAX\n#define INT_LEAST16_MIN   INT16_MIN\n#define INT_LEAST16_MAX   INT16_MAX\n#define INT_LEAST32_MIN   INT32_MIN\n#define INT_LEAST32_MAX   INT32_MAX\n#define INT_LEAST64_MIN   INT64_MIN\n#define INT_LEAST64_MAX   INT64_MAX\n#define UINT_LEAST8_MAX   UINT8_MAX\n#define UINT_LEAST16_MAX  UINT16_MAX\n#define UINT_LEAST32_MAX  UINT32_MAX\n#define UINT_LEAST64_MAX  UINT64_MAX\n\n// 7.18.2.3 Limits of fastest minimum-width integer types\n#define INT_FAST8_MIN    INT8_MIN\n#define INT_FAST8_MAX    INT8_MAX\n#define INT_FAST16_MIN   INT16_MIN\n#define INT_FAST16_MAX   INT16_MAX\n#define INT_FAST32_MIN   INT32_MIN\n#define INT_FAST32_MAX   INT32_MAX\n#define INT_FAST64_MIN   INT64_MIN\n#define INT_FAST64_MAX   INT64_MAX\n#define UINT_FAST8_MAX   UINT8_MAX\n#define UINT_FAST16_MAX  UINT16_MAX\n#define UINT_FAST32_MAX  UINT32_MAX\n#define UINT_FAST64_MAX  UINT64_MAX\n\n// 7.18.2.4 Limits of integer types capable of holding object pointers\n#ifdef _WIN64 // [\n#  define INTPTR_MIN   INT64_MIN\n#  define INTPTR_MAX   INT64_MAX\n#  define UINTPTR_MAX  UINT64_MAX\n#else // _WIN64 ][\n#  define INTPTR_MIN   INT32_MIN\n#  define INTPTR_MAX   INT32_MAX\n#  define UINTPTR_MAX  UINT32_MAX\n#endif // _WIN64 ]\n\n// 7.18.2.5 Limits of greatest-width integer types\n#define INTMAX_MIN   INT64_MIN\n#define INTMAX_MAX   INT64_MAX\n#define UINTMAX_MAX  UINT64_MAX\n\n// 7.18.3 Limits of other integer types\n\n#ifdef _WIN64 // [\n#  define PTRDIFF_MIN  _I64_MIN\n#  define PTRDIFF_MAX  _I64_MAX\n#else  // _WIN64 ][\n#  define PTRDIFF_MIN  _I32_MIN\n#  define PTRDIFF_MAX  _I32_MAX\n#endif  // _WIN64 ]\n\n#define SIG_ATOMIC_MIN  INT_MIN\n#define SIG_ATOMIC_MAX  INT_MAX\n\n#ifndef SIZE_MAX // [\n#  ifdef _WIN64 // [\n#     define SIZE_MAX  _UI64_MAX\n#  else // _WIN64 ][\n#     define SIZE_MAX  _UI32_MAX\n#  endif // _WIN64 ]\n#endif // SIZE_MAX ]\n\n// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>\n#ifndef WCHAR_MIN // [\n#  define WCHAR_MIN  0\n#endif  // WCHAR_MIN ]\n#ifndef WCHAR_MAX // [\n#  define WCHAR_MAX  _UI16_MAX\n#endif  // WCHAR_MAX ]\n\n#define WINT_MIN  0\n#define WINT_MAX  _UI16_MAX\n\n#endif // __STDC_LIMIT_MACROS ]\n\n\n// 7.18.4 Limits of other integer types\n\n#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [   See footnote 224 at page 260\n\n// 7.18.4.1 Macros for minimum-width integer constants\n\n#define INT8_C(val)  val##i8\n#define INT16_C(val) val##i16\n#define INT32_C(val) val##i32\n#define INT64_C(val) val##i64\n\n#define UINT8_C(val)  val##ui8\n#define UINT16_C(val) val##ui16\n#define UINT32_C(val) val##ui32\n#define UINT64_C(val) val##ui64\n\n// 7.18.4.2 Macros for greatest-width integer constants\n#define INTMAX_C   INT64_C\n#define UINTMAX_C  UINT64_C\n\n#endif // __STDC_CONSTANT_MACROS ]\n\n\n#endif // _MSC_STDINT_H_ ]\n"
  },
  {
    "path": "src/3rdparty/glut/include/GL/glut.h",
    "content": "#ifndef __glut_h__\n#define __glut_h__\n\n/* Copyright (c) Mark J. Kilgard, 1994, 1995, 1996, 1998. */\n\n/* This program is freely distributable without licensing fees  and is\n   provided without guarantee or warrantee expressed or  implied. This\n   program is -not- in the public domain. */\n\n#if defined(_WIN32)\n\n/* GLUT 3.7 now tries to avoid including <windows.h>\n   to avoid name space pollution, but Win32's <GL/gl.h> \n   needs APIENTRY and WINGDIAPI defined properly. */\n# if 0\n   /* This would put tons of macros and crap in our clean name space. */\n#  define  WIN32_LEAN_AND_MEAN\n#  include <windows.h>\n# else\n   /* XXX This is from Win32's <windef.h> */\n#  ifndef APIENTRY\n#   define GLUT_APIENTRY_DEFINED\n#   if (_MSC_VER >= 800) || defined(_STDCALL_SUPPORTED) || defined(__BORLANDC__) || defined(__LCC__)\n#    define APIENTRY    __stdcall\n#   else\n#    define APIENTRY\n#   endif\n#  endif\n   /* XXX This is from Win32's <winnt.h> */\n#  ifndef CALLBACK\n#   if (defined(_M_MRX000) || defined(_M_IX86) || defined(_M_ALPHA) || defined(_M_PPC)) && !defined(MIDL_PASS) || defined(__LCC__)\n#    define CALLBACK __stdcall\n#   else\n#    define CALLBACK\n#   endif\n#  endif\n   /* XXX Hack for lcc compiler.  It doesn't support __declspec(dllimport), just __stdcall. */\n#  if defined( __LCC__ )\n#   undef WINGDIAPI\n#   define WINGDIAPI __stdcall\n#  else\n   /* XXX This is from Win32's <wingdi.h> and <winnt.h> */\n#   ifndef WINGDIAPI\n#    define GLUT_WINGDIAPI_DEFINED\n#    define WINGDIAPI __declspec(dllimport)\n#   endif\n#  endif\n   /* XXX This is from Win32's <ctype.h> */\n#  ifndef _WCHAR_T_DEFINED\ntypedef unsigned short wchar_t;\n#   define _WCHAR_T_DEFINED\n#  endif\n# endif\n\n/* To disable automatic library usage for GLUT, define GLUT_NO_LIB_PRAGMA\n   in your compile preprocessor options. */\n# if !defined(GLUT_BUILDING_LIB) && !defined(GLUT_NO_LIB_PRAGMA)\n#  pragma comment (lib, \"winmm.lib\")      /* link with Windows MultiMedia lib */\n/* To enable automatic SGI OpenGL for Windows library usage for GLUT,\n   define GLUT_USE_SGI_OPENGL in your compile preprocessor options.  */\n#  ifdef GLUT_USE_SGI_OPENGL\n#   pragma comment (lib, \"opengl.lib\")    /* link with SGI OpenGL for Windows lib */\n#   pragma comment (lib, \"glu.lib\")       /* link with SGI OpenGL Utility lib */\n#   pragma comment (lib, \"glut.lib\")      /* link with Win32 GLUT for SGI OpenGL lib */\n#  else\n#   pragma comment (lib, \"opengl32.lib\")  /* link with Microsoft OpenGL lib */\n#   pragma comment (lib, \"glu32.lib\")     /* link with Microsoft OpenGL Utility lib */\n#   pragma comment (lib, \"glut32.lib\")    /* link with Win32 GLUT lib */\n#  endif\n# endif\n\n/* To disable supression of annoying warnings about floats being promoted\n   to doubles, define GLUT_NO_WARNING_DISABLE in your compile preprocessor\n   options. */\n# ifndef GLUT_NO_WARNING_DISABLE\n#  pragma warning (disable:4244)  /* Disable bogus VC++ 4.2 conversion warnings. */\n#  pragma warning (disable:4305)  /* VC++ 5.0 version of above warning. */\n# endif\n\n/* Win32 has an annoying issue where there are multiple C run-time\n   libraries (CRTs).  If the executable is linked with a different CRT\n   from the GLUT DLL, the GLUT DLL will not share the same CRT static\n   data seen by the executable.  In particular, atexit callbacks registered\n   in the executable will not be called if GLUT calls its (different)\n   exit routine).  GLUT is typically built with the\n   \"/MD\" option (the CRT with multithreading DLL support), but the Visual\n   C++ linker default is \"/ML\" (the single threaded CRT).\n\n   One workaround to this issue is requiring users to always link with\n   the same CRT as GLUT is compiled with.  That requires users supply a\n   non-standard option.  GLUT 3.7 has its own built-in workaround where\n   the executable's \"exit\" function pointer is covertly passed to GLUT.\n   GLUT then calls the executable's exit function pointer to ensure that\n   any \"atexit\" calls registered by the application are called if GLUT\n   needs to exit.\n\n   Note that the __glut*WithExit routines should NEVER be called directly.\n   To avoid the atexit workaround, #define GLUT_DISABLE_ATEXIT_HACK. */\n\n/* XXX This is from Win32's <process.h> */\n# if !defined(_MSC_VER) && !defined(__cdecl)\n   /* Define __cdecl for non-Microsoft compilers. */\n#  define __cdecl\n#  define GLUT_DEFINED___CDECL\n# endif\n# ifndef _CRTIMP\n#  ifdef _NTSDK\n    /* Definition compatible with NT SDK */\n#   define _CRTIMP\n#  else\n    /* Current definition */\n#   ifdef _DLL\n#    define _CRTIMP __declspec(dllimport)\n#   else\n#    define _CRTIMP\n#   endif\n#  endif\n#  define GLUT_DEFINED__CRTIMP\n# endif\n\n/* GLUT API entry point declarations for Win32. */\n# ifdef GLUT_BUILDING_LIB\n#  define GLUTAPI __declspec(dllexport)\n# else\n#  ifdef _DLL\n#   define GLUTAPI __declspec(dllimport)\n#  else\n#   define GLUTAPI extern\n#  endif\n# endif\n\n/* GLUT callback calling convention for Win32. */\n# define GLUTCALLBACK __cdecl\n\n#endif  /* _WIN32 */\n\n#include <GL/gl.h>\n#include <GL/glu.h>\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#if defined(_WIN32)\n# ifndef GLUT_BUILDING_LIB\nextern _CRTIMP void __cdecl exit(int);\n# endif\n#else\n/* non-Win32 case. */\n/* Define APIENTRY and CALLBACK to nothing if we aren't on Win32. */\n# define APIENTRY\n# define GLUT_APIENTRY_DEFINED\n# define CALLBACK\n/* Define GLUTAPI and GLUTCALLBACK as below if we aren't on Win32. */\n# define GLUTAPI extern\n# define GLUTCALLBACK\n/* Prototype exit for the non-Win32 case (see above). */\nextern void exit(int);\n#endif\n\n/**\n GLUT API revision history:\n \n GLUT_API_VERSION is updated to reflect incompatible GLUT\n API changes (interface changes, semantic changes, deletions,\n or additions).\n \n GLUT_API_VERSION=1  First public release of GLUT.  11/29/94\n\n GLUT_API_VERSION=2  Added support for OpenGL/GLX multisampling,\n extension.  Supports new input devices like tablet, dial and button\n box, and Spaceball.  Easy to query OpenGL extensions.\n\n GLUT_API_VERSION=3  glutMenuStatus added.\n\n GLUT_API_VERSION=4  glutInitDisplayString, glutWarpPointer,\n glutBitmapLength, glutStrokeLength, glutWindowStatusFunc, dynamic\n video resize subAPI, glutPostWindowRedisplay, glutKeyboardUpFunc,\n glutSpecialUpFunc, glutIgnoreKeyRepeat, glutSetKeyRepeat,\n glutJoystickFunc, glutForceJoystickFunc (NOT FINALIZED!).\n**/\n#ifndef GLUT_API_VERSION  /* allow this to be overriden */\n#define GLUT_API_VERSION\t\t3\n#endif\n\n/**\n GLUT implementation revision history:\n \n GLUT_XLIB_IMPLEMENTATION is updated to reflect both GLUT\n API revisions and implementation revisions (ie, bug fixes).\n\n GLUT_XLIB_IMPLEMENTATION=1  mjk's first public release of\n GLUT Xlib-based implementation.  11/29/94\n\n GLUT_XLIB_IMPLEMENTATION=2  mjk's second public release of\n GLUT Xlib-based implementation providing GLUT version 2 \n interfaces.\n\n GLUT_XLIB_IMPLEMENTATION=3  mjk's GLUT 2.2 images. 4/17/95\n\n GLUT_XLIB_IMPLEMENTATION=4  mjk's GLUT 2.3 images. 6/?/95\n\n GLUT_XLIB_IMPLEMENTATION=5  mjk's GLUT 3.0 images. 10/?/95\n\n GLUT_XLIB_IMPLEMENTATION=7  mjk's GLUT 3.1+ with glutWarpPoitner.  7/24/96\n\n GLUT_XLIB_IMPLEMENTATION=8  mjk's GLUT 3.1+ with glutWarpPoitner\n and video resize.  1/3/97\n\n GLUT_XLIB_IMPLEMENTATION=9 mjk's GLUT 3.4 release with early GLUT 4 routines.\n\n GLUT_XLIB_IMPLEMENTATION=11 Mesa 2.5's GLUT 3.6 release.\n\n GLUT_XLIB_IMPLEMENTATION=12 mjk's GLUT 3.6 release with early GLUT 4 routines + signal handling.\n\n GLUT_XLIB_IMPLEMENTATION=13 mjk's GLUT 3.7 beta with GameGLUT support.\n\n GLUT_XLIB_IMPLEMENTATION=14 mjk's GLUT 3.7 beta with f90gl friend interface.\n\n GLUT_XLIB_IMPLEMENTATION=15 mjk's GLUT 3.7 beta sync'ed with Mesa <GL/glut.h>\n**/\n#ifndef GLUT_XLIB_IMPLEMENTATION  /* Allow this to be overriden. */\n#define GLUT_XLIB_IMPLEMENTATION\t15\n#endif\n\n/* Display mode bit masks. */\n#define GLUT_RGB\t\t\t0\n#define GLUT_RGBA\t\t\tGLUT_RGB\n#define GLUT_INDEX\t\t\t1\n#define GLUT_SINGLE\t\t\t0\n#define GLUT_DOUBLE\t\t\t2\n#define GLUT_ACCUM\t\t\t4\n#define GLUT_ALPHA\t\t\t8\n#define GLUT_DEPTH\t\t\t16\n#define GLUT_STENCIL\t\t\t32\n#if (GLUT_API_VERSION >= 2)\n#define GLUT_MULTISAMPLE\t\t128\n#define GLUT_STEREO\t\t\t256\n#endif\n#if (GLUT_API_VERSION >= 3)\n#define GLUT_LUMINANCE\t\t\t512\n#endif\n\n/* Mouse buttons. */\n#define GLUT_LEFT_BUTTON\t\t0\n#define GLUT_MIDDLE_BUTTON\t\t1\n#define GLUT_RIGHT_BUTTON\t\t2\n\n/* Mouse button  state. */\n#define GLUT_DOWN\t\t\t0\n#define GLUT_UP\t\t\t\t1\n\n#if (GLUT_API_VERSION >= 2)\n/* function keys */\n#define GLUT_KEY_F1\t\t\t1\n#define GLUT_KEY_F2\t\t\t2\n#define GLUT_KEY_F3\t\t\t3\n#define GLUT_KEY_F4\t\t\t4\n#define GLUT_KEY_F5\t\t\t5\n#define GLUT_KEY_F6\t\t\t6\n#define GLUT_KEY_F7\t\t\t7\n#define GLUT_KEY_F8\t\t\t8\n#define GLUT_KEY_F9\t\t\t9\n#define GLUT_KEY_F10\t\t\t10\n#define GLUT_KEY_F11\t\t\t11\n#define GLUT_KEY_F12\t\t\t12\n/* directional keys */\n#define GLUT_KEY_LEFT\t\t\t100\n#define GLUT_KEY_UP\t\t\t101\n#define GLUT_KEY_RIGHT\t\t\t102\n#define GLUT_KEY_DOWN\t\t\t103\n#define GLUT_KEY_PAGE_UP\t\t104\n#define GLUT_KEY_PAGE_DOWN\t\t105\n#define GLUT_KEY_HOME\t\t\t106\n#define GLUT_KEY_END\t\t\t107\n#define GLUT_KEY_INSERT\t\t\t108\n#endif\n\n/* Entry/exit  state. */\n#define GLUT_LEFT\t\t\t0\n#define GLUT_ENTERED\t\t\t1\n\n/* Menu usage  state. */\n#define GLUT_MENU_NOT_IN_USE\t\t0\n#define GLUT_MENU_IN_USE\t\t1\n\n/* Visibility  state. */\n#define GLUT_NOT_VISIBLE\t\t0\n#define GLUT_VISIBLE\t\t\t1\n\n/* Window status  state. */\n#define GLUT_HIDDEN\t\t\t0\n#define GLUT_FULLY_RETAINED\t\t1\n#define GLUT_PARTIALLY_RETAINED\t\t2\n#define GLUT_FULLY_COVERED\t\t3\n\n/* Color index component selection values. */\n#define GLUT_RED\t\t\t0\n#define GLUT_GREEN\t\t\t1\n#define GLUT_BLUE\t\t\t2\n\n#if defined(_WIN32)\n/* Stroke font constants (use these in GLUT program). */\n#define GLUT_STROKE_ROMAN\t\t((void*)0)\n#define GLUT_STROKE_MONO_ROMAN\t\t((void*)1)\n\n/* Bitmap font constants (use these in GLUT program). */\n#define GLUT_BITMAP_9_BY_15\t\t((void*)2)\n#define GLUT_BITMAP_8_BY_13\t\t((void*)3)\n#define GLUT_BITMAP_TIMES_ROMAN_10\t((void*)4)\n#define GLUT_BITMAP_TIMES_ROMAN_24\t((void*)5)\n#if (GLUT_API_VERSION >= 3)\n#define GLUT_BITMAP_HELVETICA_10\t((void*)6)\n#define GLUT_BITMAP_HELVETICA_12\t((void*)7)\n#define GLUT_BITMAP_HELVETICA_18\t((void*)8)\n#endif\n#else\n/* Stroke font opaque addresses (use constants instead in source code). */\nGLUTAPI void *glutStrokeRoman;\nGLUTAPI void *glutStrokeMonoRoman;\n\n/* Stroke font constants (use these in GLUT program). */\n#define GLUT_STROKE_ROMAN\t\t(&glutStrokeRoman)\n#define GLUT_STROKE_MONO_ROMAN\t\t(&glutStrokeMonoRoman)\n\n/* Bitmap font opaque addresses (use constants instead in source code). */\nGLUTAPI void *glutBitmap9By15;\nGLUTAPI void *glutBitmap8By13;\nGLUTAPI void *glutBitmapTimesRoman10;\nGLUTAPI void *glutBitmapTimesRoman24;\nGLUTAPI void *glutBitmapHelvetica10;\nGLUTAPI void *glutBitmapHelvetica12;\nGLUTAPI void *glutBitmapHelvetica18;\n\n/* Bitmap font constants (use these in GLUT program). */\n#define GLUT_BITMAP_9_BY_15\t\t(&glutBitmap9By15)\n#define GLUT_BITMAP_8_BY_13\t\t(&glutBitmap8By13)\n#define GLUT_BITMAP_TIMES_ROMAN_10\t(&glutBitmapTimesRoman10)\n#define GLUT_BITMAP_TIMES_ROMAN_24\t(&glutBitmapTimesRoman24)\n#if (GLUT_API_VERSION >= 3)\n#define GLUT_BITMAP_HELVETICA_10\t(&glutBitmapHelvetica10)\n#define GLUT_BITMAP_HELVETICA_12\t(&glutBitmapHelvetica12)\n#define GLUT_BITMAP_HELVETICA_18\t(&glutBitmapHelvetica18)\n#endif\n#endif\n\n/* glutGet parameters. */\n#define GLUT_WINDOW_X\t\t\t((GLenum) 100)\n#define GLUT_WINDOW_Y\t\t\t((GLenum) 101)\n#define GLUT_WINDOW_WIDTH\t\t((GLenum) 102)\n#define GLUT_WINDOW_HEIGHT\t\t((GLenum) 103)\n#define GLUT_WINDOW_BUFFER_SIZE\t\t((GLenum) 104)\n#define GLUT_WINDOW_STENCIL_SIZE\t((GLenum) 105)\n#define GLUT_WINDOW_DEPTH_SIZE\t\t((GLenum) 106)\n#define GLUT_WINDOW_RED_SIZE\t\t((GLenum) 107)\n#define GLUT_WINDOW_GREEN_SIZE\t\t((GLenum) 108)\n#define GLUT_WINDOW_BLUE_SIZE\t\t((GLenum) 109)\n#define GLUT_WINDOW_ALPHA_SIZE\t\t((GLenum) 110)\n#define GLUT_WINDOW_ACCUM_RED_SIZE\t((GLenum) 111)\n#define GLUT_WINDOW_ACCUM_GREEN_SIZE\t((GLenum) 112)\n#define GLUT_WINDOW_ACCUM_BLUE_SIZE\t((GLenum) 113)\n#define GLUT_WINDOW_ACCUM_ALPHA_SIZE\t((GLenum) 114)\n#define GLUT_WINDOW_DOUBLEBUFFER\t((GLenum) 115)\n#define GLUT_WINDOW_RGBA\t\t((GLenum) 116)\n#define GLUT_WINDOW_PARENT\t\t((GLenum) 117)\n#define GLUT_WINDOW_NUM_CHILDREN\t((GLenum) 118)\n#define GLUT_WINDOW_COLORMAP_SIZE\t((GLenum) 119)\n#if (GLUT_API_VERSION >= 2)\n#define GLUT_WINDOW_NUM_SAMPLES\t\t((GLenum) 120)\n#define GLUT_WINDOW_STEREO\t\t((GLenum) 121)\n#endif\n#if (GLUT_API_VERSION >= 3)\n#define GLUT_WINDOW_CURSOR\t\t((GLenum) 122)\n#endif\n#define GLUT_SCREEN_WIDTH\t\t((GLenum) 200)\n#define GLUT_SCREEN_HEIGHT\t\t((GLenum) 201)\n#define GLUT_SCREEN_WIDTH_MM\t\t((GLenum) 202)\n#define GLUT_SCREEN_HEIGHT_MM\t\t((GLenum) 203)\n#define GLUT_MENU_NUM_ITEMS\t\t((GLenum) 300)\n#define GLUT_DISPLAY_MODE_POSSIBLE\t((GLenum) 400)\n#define GLUT_INIT_WINDOW_X\t\t((GLenum) 500)\n#define GLUT_INIT_WINDOW_Y\t\t((GLenum) 501)\n#define GLUT_INIT_WINDOW_WIDTH\t\t((GLenum) 502)\n#define GLUT_INIT_WINDOW_HEIGHT\t\t((GLenum) 503)\n#define GLUT_INIT_DISPLAY_MODE\t\t((GLenum) 504)\n#if (GLUT_API_VERSION >= 2)\n#define GLUT_ELAPSED_TIME\t\t((GLenum) 700)\n#endif\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 13)\n#define GLUT_WINDOW_FORMAT_ID\t\t((GLenum) 123)\n#endif\n\n#if (GLUT_API_VERSION >= 2)\n/* glutDeviceGet parameters. */\n#define GLUT_HAS_KEYBOARD\t\t((GLenum) 600)\n#define GLUT_HAS_MOUSE\t\t\t((GLenum) 601)\n#define GLUT_HAS_SPACEBALL\t\t((GLenum) 602)\n#define GLUT_HAS_DIAL_AND_BUTTON_BOX\t((GLenum) 603)\n#define GLUT_HAS_TABLET\t\t\t((GLenum) 604)\n#define GLUT_NUM_MOUSE_BUTTONS\t\t((GLenum) 605)\n#define GLUT_NUM_SPACEBALL_BUTTONS\t((GLenum) 606)\n#define GLUT_NUM_BUTTON_BOX_BUTTONS\t((GLenum) 607)\n#define GLUT_NUM_DIALS\t\t\t((GLenum) 608)\n#define GLUT_NUM_TABLET_BUTTONS\t\t((GLenum) 609)\n#endif\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 13)\n#define GLUT_DEVICE_IGNORE_KEY_REPEAT   ((GLenum) 610)\n#define GLUT_DEVICE_KEY_REPEAT          ((GLenum) 611)\n#define GLUT_HAS_JOYSTICK\t\t((GLenum) 612)\n#define GLUT_OWNS_JOYSTICK\t\t((GLenum) 613)\n#define GLUT_JOYSTICK_BUTTONS\t\t((GLenum) 614)\n#define GLUT_JOYSTICK_AXES\t\t((GLenum) 615)\n#define GLUT_JOYSTICK_POLL_RATE\t\t((GLenum) 616)\n#endif\n\n#if (GLUT_API_VERSION >= 3)\n/* glutLayerGet parameters. */\n#define GLUT_OVERLAY_POSSIBLE           ((GLenum) 800)\n#define GLUT_LAYER_IN_USE\t\t((GLenum) 801)\n#define GLUT_HAS_OVERLAY\t\t((GLenum) 802)\n#define GLUT_TRANSPARENT_INDEX\t\t((GLenum) 803)\n#define GLUT_NORMAL_DAMAGED\t\t((GLenum) 804)\n#define GLUT_OVERLAY_DAMAGED\t\t((GLenum) 805)\n\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 9)\n/* glutVideoResizeGet parameters. */\n#define GLUT_VIDEO_RESIZE_POSSIBLE\t((GLenum) 900)\n#define GLUT_VIDEO_RESIZE_IN_USE\t((GLenum) 901)\n#define GLUT_VIDEO_RESIZE_X_DELTA\t((GLenum) 902)\n#define GLUT_VIDEO_RESIZE_Y_DELTA\t((GLenum) 903)\n#define GLUT_VIDEO_RESIZE_WIDTH_DELTA\t((GLenum) 904)\n#define GLUT_VIDEO_RESIZE_HEIGHT_DELTA\t((GLenum) 905)\n#define GLUT_VIDEO_RESIZE_X\t\t((GLenum) 906)\n#define GLUT_VIDEO_RESIZE_Y\t\t((GLenum) 907)\n#define GLUT_VIDEO_RESIZE_WIDTH\t\t((GLenum) 908)\n#define GLUT_VIDEO_RESIZE_HEIGHT\t((GLenum) 909)\n#endif\n\n/* glutUseLayer parameters. */\n#define GLUT_NORMAL\t\t\t((GLenum) 0)\n#define GLUT_OVERLAY\t\t\t((GLenum) 1)\n\n/* glutGetModifiers return mask. */\n#define GLUT_ACTIVE_SHIFT               1\n#define GLUT_ACTIVE_CTRL                2\n#define GLUT_ACTIVE_ALT                 4\n\n/* glutSetCursor parameters. */\n/* Basic arrows. */\n#define GLUT_CURSOR_RIGHT_ARROW\t\t0\n#define GLUT_CURSOR_LEFT_ARROW\t\t1\n/* Symbolic cursor shapes. */\n#define GLUT_CURSOR_INFO\t\t2\n#define GLUT_CURSOR_DESTROY\t\t3\n#define GLUT_CURSOR_HELP\t\t4\n#define GLUT_CURSOR_CYCLE\t\t5\n#define GLUT_CURSOR_SPRAY\t\t6\n#define GLUT_CURSOR_WAIT\t\t7\n#define GLUT_CURSOR_TEXT\t\t8\n#define GLUT_CURSOR_CROSSHAIR\t\t9\n/* Directional cursors. */\n#define GLUT_CURSOR_UP_DOWN\t\t10\n#define GLUT_CURSOR_LEFT_RIGHT\t\t11\n/* Sizing cursors. */\n#define GLUT_CURSOR_TOP_SIDE\t\t12\n#define GLUT_CURSOR_BOTTOM_SIDE\t\t13\n#define GLUT_CURSOR_LEFT_SIDE\t\t14\n#define GLUT_CURSOR_RIGHT_SIDE\t\t15\n#define GLUT_CURSOR_TOP_LEFT_CORNER\t16\n#define GLUT_CURSOR_TOP_RIGHT_CORNER\t17\n#define GLUT_CURSOR_BOTTOM_RIGHT_CORNER\t18\n#define GLUT_CURSOR_BOTTOM_LEFT_CORNER\t19\n/* Inherit from parent window. */\n#define GLUT_CURSOR_INHERIT\t\t100\n/* Blank cursor. */\n#define GLUT_CURSOR_NONE\t\t101\n/* Fullscreen crosshair (if available). */\n#define GLUT_CURSOR_FULL_CROSSHAIR\t102\n#endif\n\n/* GLUT initialization sub-API. */\nGLUTAPI void APIENTRY glutInit(int *argcp, char **argv);\n#if defined(_WIN32) && !defined(GLUT_DISABLE_ATEXIT_HACK)\nGLUTAPI void APIENTRY __glutInitWithExit(int *argcp, char **argv, void (__cdecl *exitfunc)(int));\n#ifndef GLUT_BUILDING_LIB\nstatic void APIENTRY glutInit_ATEXIT_HACK(int *argcp, char **argv) { __glutInitWithExit(argcp, argv, exit); }\n#define glutInit glutInit_ATEXIT_HACK\n#endif\n#endif\nGLUTAPI void APIENTRY glutInitDisplayMode(unsigned int mode);\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 9)\nGLUTAPI void APIENTRY glutInitDisplayString(const char *string);\n#endif\nGLUTAPI void APIENTRY glutInitWindowPosition(int x, int y);\nGLUTAPI void APIENTRY glutInitWindowSize(int width, int height);\nGLUTAPI void APIENTRY glutMainLoop(void);\n\n/* GLUT window sub-API. */\nGLUTAPI int APIENTRY glutCreateWindow(const char *title);\n#if defined(_WIN32) && !defined(GLUT_DISABLE_ATEXIT_HACK)\nGLUTAPI int APIENTRY __glutCreateWindowWithExit(const char *title, void (__cdecl *exitfunc)(int));\n#ifndef GLUT_BUILDING_LIB\nstatic int APIENTRY glutCreateWindow_ATEXIT_HACK(const char *title) { return __glutCreateWindowWithExit(title, exit); }\n#define glutCreateWindow glutCreateWindow_ATEXIT_HACK\n#endif\n#endif\nGLUTAPI int APIENTRY glutCreateSubWindow(int win, int x, int y, int width, int height);\nGLUTAPI void APIENTRY glutDestroyWindow(int win);\nGLUTAPI void APIENTRY glutPostRedisplay(void);\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 11)\nGLUTAPI void APIENTRY glutPostWindowRedisplay(int win);\n#endif\nGLUTAPI void APIENTRY glutSwapBuffers(void);\nGLUTAPI int APIENTRY glutGetWindow(void);\nGLUTAPI void APIENTRY glutSetWindow(int win);\nGLUTAPI void APIENTRY glutSetWindowTitle(const char *title);\nGLUTAPI void APIENTRY glutSetIconTitle(const char *title);\nGLUTAPI void APIENTRY glutPositionWindow(int x, int y);\nGLUTAPI void APIENTRY glutReshapeWindow(int width, int height);\nGLUTAPI void APIENTRY glutPopWindow(void);\nGLUTAPI void APIENTRY glutPushWindow(void);\nGLUTAPI void APIENTRY glutIconifyWindow(void);\nGLUTAPI void APIENTRY glutShowWindow(void);\nGLUTAPI void APIENTRY glutHideWindow(void);\n#if (GLUT_API_VERSION >= 3)\nGLUTAPI void APIENTRY glutFullScreen(void);\nGLUTAPI void APIENTRY glutSetCursor(int cursor);\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 9)\nGLUTAPI void APIENTRY glutWarpPointer(int x, int y);\n#endif\n\n/* GLUT overlay sub-API. */\nGLUTAPI void APIENTRY glutEstablishOverlay(void);\nGLUTAPI void APIENTRY glutRemoveOverlay(void);\nGLUTAPI void APIENTRY glutUseLayer(GLenum layer);\nGLUTAPI void APIENTRY glutPostOverlayRedisplay(void);\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 11)\nGLUTAPI void APIENTRY glutPostWindowOverlayRedisplay(int win);\n#endif\nGLUTAPI void APIENTRY glutShowOverlay(void);\nGLUTAPI void APIENTRY glutHideOverlay(void);\n#endif\n\n/* GLUT menu sub-API. */\nGLUTAPI int APIENTRY glutCreateMenu(void (GLUTCALLBACK *func)(int));\n#if defined(_WIN32) && !defined(GLUT_DISABLE_ATEXIT_HACK)\nGLUTAPI int APIENTRY __glutCreateMenuWithExit(void (GLUTCALLBACK *func)(int), void (__cdecl *exitfunc)(int));\n#ifndef GLUT_BUILDING_LIB\nstatic int APIENTRY glutCreateMenu_ATEXIT_HACK(void (GLUTCALLBACK *func)(int)) { return __glutCreateMenuWithExit(func, exit); }\n#define glutCreateMenu glutCreateMenu_ATEXIT_HACK\n#endif\n#endif\nGLUTAPI void APIENTRY glutDestroyMenu(int menu);\nGLUTAPI int APIENTRY glutGetMenu(void);\nGLUTAPI void APIENTRY glutSetMenu(int menu);\nGLUTAPI void APIENTRY glutAddMenuEntry(const char *label, int value);\nGLUTAPI void APIENTRY glutAddSubMenu(const char *label, int submenu);\nGLUTAPI void APIENTRY glutChangeToMenuEntry(int item, const char *label, int value);\nGLUTAPI void APIENTRY glutChangeToSubMenu(int item, const char *label, int submenu);\nGLUTAPI void APIENTRY glutRemoveMenuItem(int item);\nGLUTAPI void APIENTRY glutAttachMenu(int button);\nGLUTAPI void APIENTRY glutDetachMenu(int button);\n\n/* GLUT window callback sub-API. */\nGLUTAPI void APIENTRY glutDisplayFunc(void (GLUTCALLBACK *func)(void));\nGLUTAPI void APIENTRY glutReshapeFunc(void (GLUTCALLBACK *func)(int width, int height));\nGLUTAPI void APIENTRY glutKeyboardFunc(void (GLUTCALLBACK *func)(unsigned char key, int x, int y));\nGLUTAPI void APIENTRY glutMouseFunc(void (GLUTCALLBACK *func)(int button, int state, int x, int y));\nGLUTAPI void APIENTRY glutMotionFunc(void (GLUTCALLBACK *func)(int x, int y));\nGLUTAPI void APIENTRY glutPassiveMotionFunc(void (GLUTCALLBACK *func)(int x, int y));\nGLUTAPI void APIENTRY glutEntryFunc(void (GLUTCALLBACK *func)(int state));\nGLUTAPI void APIENTRY glutVisibilityFunc(void (GLUTCALLBACK *func)(int state));\nGLUTAPI void APIENTRY glutIdleFunc(void (GLUTCALLBACK *func)(void));\nGLUTAPI void APIENTRY glutTimerFunc(unsigned int millis, void (GLUTCALLBACK *func)(int value), int value);\nGLUTAPI void APIENTRY glutMenuStateFunc(void (GLUTCALLBACK *func)(int state));\n#if (GLUT_API_VERSION >= 2)\nGLUTAPI void APIENTRY glutSpecialFunc(void (GLUTCALLBACK *func)(int key, int x, int y));\nGLUTAPI void APIENTRY glutSpaceballMotionFunc(void (GLUTCALLBACK *func)(int x, int y, int z));\nGLUTAPI void APIENTRY glutSpaceballRotateFunc(void (GLUTCALLBACK *func)(int x, int y, int z));\nGLUTAPI void APIENTRY glutSpaceballButtonFunc(void (GLUTCALLBACK *func)(int button, int state));\nGLUTAPI void APIENTRY glutButtonBoxFunc(void (GLUTCALLBACK *func)(int button, int state));\nGLUTAPI void APIENTRY glutDialsFunc(void (GLUTCALLBACK *func)(int dial, int value));\nGLUTAPI void APIENTRY glutTabletMotionFunc(void (GLUTCALLBACK *func)(int x, int y));\nGLUTAPI void APIENTRY glutTabletButtonFunc(void (GLUTCALLBACK *func)(int button, int state, int x, int y));\n#if (GLUT_API_VERSION >= 3)\nGLUTAPI void APIENTRY glutMenuStatusFunc(void (GLUTCALLBACK *func)(int status, int x, int y));\nGLUTAPI void APIENTRY glutOverlayDisplayFunc(void (GLUTCALLBACK *func)(void));\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 9)\nGLUTAPI void APIENTRY glutWindowStatusFunc(void (GLUTCALLBACK *func)(int state));\n#endif\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 13)\nGLUTAPI void APIENTRY glutKeyboardUpFunc(void (GLUTCALLBACK *func)(unsigned char key, int x, int y));\nGLUTAPI void APIENTRY glutSpecialUpFunc(void (GLUTCALLBACK *func)(int key, int x, int y));\nGLUTAPI void APIENTRY glutJoystickFunc(void (GLUTCALLBACK *func)(unsigned int buttonMask, int x, int y, int z), int pollInterval);\n#endif\n#endif\n#endif\n\n/* GLUT color index sub-API. */\nGLUTAPI void APIENTRY glutSetColor(int, GLfloat red, GLfloat green, GLfloat blue);\nGLUTAPI GLfloat APIENTRY glutGetColor(int ndx, int component);\nGLUTAPI void APIENTRY glutCopyColormap(int win);\n\n/* GLUT state retrieval sub-API. */\nGLUTAPI int APIENTRY glutGet(GLenum type);\nGLUTAPI int APIENTRY glutDeviceGet(GLenum type);\n#if (GLUT_API_VERSION >= 2)\n/* GLUT extension support sub-API */\nGLUTAPI int APIENTRY glutExtensionSupported(const char *name);\n#endif\n#if (GLUT_API_VERSION >= 3)\nGLUTAPI int APIENTRY glutGetModifiers(void);\nGLUTAPI int APIENTRY glutLayerGet(GLenum type);\n#endif\n\n/* GLUT font sub-API */\nGLUTAPI void APIENTRY glutBitmapCharacter(void *font, int character);\nGLUTAPI int APIENTRY glutBitmapWidth(void *font, int character);\nGLUTAPI void APIENTRY glutStrokeCharacter(void *font, int character);\nGLUTAPI int APIENTRY glutStrokeWidth(void *font, int character);\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 9)\nGLUTAPI int APIENTRY glutBitmapLength(void *font, const unsigned char *string);\nGLUTAPI int APIENTRY glutStrokeLength(void *font, const unsigned char *string);\n#endif\n\n/* GLUT pre-built models sub-API */\nGLUTAPI void APIENTRY glutWireSphere(GLdouble radius, GLint slices, GLint stacks);\nGLUTAPI void APIENTRY glutSolidSphere(GLdouble radius, GLint slices, GLint stacks);\nGLUTAPI void APIENTRY glutWireCone(GLdouble base, GLdouble height, GLint slices, GLint stacks);\nGLUTAPI void APIENTRY glutSolidCone(GLdouble base, GLdouble height, GLint slices, GLint stacks);\nGLUTAPI void APIENTRY glutWireCube(GLdouble size);\nGLUTAPI void APIENTRY glutSolidCube(GLdouble size);\nGLUTAPI void APIENTRY glutWireTorus(GLdouble innerRadius, GLdouble outerRadius, GLint sides, GLint rings);\nGLUTAPI void APIENTRY glutSolidTorus(GLdouble innerRadius, GLdouble outerRadius, GLint sides, GLint rings);\nGLUTAPI void APIENTRY glutWireDodecahedron(void);\nGLUTAPI void APIENTRY glutSolidDodecahedron(void);\nGLUTAPI void APIENTRY glutWireTeapot(GLdouble size);\nGLUTAPI void APIENTRY glutSolidTeapot(GLdouble size);\nGLUTAPI void APIENTRY glutWireOctahedron(void);\nGLUTAPI void APIENTRY glutSolidOctahedron(void);\nGLUTAPI void APIENTRY glutWireTetrahedron(void);\nGLUTAPI void APIENTRY glutSolidTetrahedron(void);\nGLUTAPI void APIENTRY glutWireIcosahedron(void);\nGLUTAPI void APIENTRY glutSolidIcosahedron(void);\n\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 9)\n/* GLUT video resize sub-API. */\nGLUTAPI int APIENTRY glutVideoResizeGet(GLenum param);\nGLUTAPI void APIENTRY glutSetupVideoResizing(void);\nGLUTAPI void APIENTRY glutStopVideoResizing(void);\nGLUTAPI void APIENTRY glutVideoResize(int x, int y, int width, int height);\nGLUTAPI void APIENTRY glutVideoPan(int x, int y, int width, int height);\n\n/* GLUT debugging sub-API. */\nGLUTAPI void APIENTRY glutReportErrors(void);\n#endif\n\n#if (GLUT_API_VERSION >= 4 || GLUT_XLIB_IMPLEMENTATION >= 13)\n/* GLUT device control sub-API. */\n/* glutSetKeyRepeat modes. */\n#define GLUT_KEY_REPEAT_OFF\t\t0\n#define GLUT_KEY_REPEAT_ON\t\t1\n#define GLUT_KEY_REPEAT_DEFAULT\t\t2\n\n/* Joystick button masks. */\n#define GLUT_JOYSTICK_BUTTON_A\t\t1\n#define GLUT_JOYSTICK_BUTTON_B\t\t2\n#define GLUT_JOYSTICK_BUTTON_C\t\t4\n#define GLUT_JOYSTICK_BUTTON_D\t\t8\n\nGLUTAPI void APIENTRY glutIgnoreKeyRepeat(int ignore);\nGLUTAPI void APIENTRY glutSetKeyRepeat(int repeatMode);\nGLUTAPI void APIENTRY glutForceJoystickFunc(void);\n\n/* GLUT game mode sub-API. */\n/* glutGameModeGet. */\n#define GLUT_GAME_MODE_ACTIVE           ((GLenum) 0)\n#define GLUT_GAME_MODE_POSSIBLE         ((GLenum) 1)\n#define GLUT_GAME_MODE_WIDTH            ((GLenum) 2)\n#define GLUT_GAME_MODE_HEIGHT           ((GLenum) 3)\n#define GLUT_GAME_MODE_PIXEL_DEPTH      ((GLenum) 4)\n#define GLUT_GAME_MODE_REFRESH_RATE     ((GLenum) 5)\n#define GLUT_GAME_MODE_DISPLAY_CHANGED  ((GLenum) 6)\n\nGLUTAPI void APIENTRY glutGameModeString(const char *string);\nGLUTAPI int APIENTRY glutEnterGameMode(void);\nGLUTAPI void APIENTRY glutLeaveGameMode(void);\nGLUTAPI int APIENTRY glutGameModeGet(GLenum mode);\n#endif\n\n#ifdef __cplusplus\n}\n\n#endif\n\n#ifdef GLUT_APIENTRY_DEFINED\n# undef GLUT_APIENTRY_DEFINED\n# undef APIENTRY\n#endif\n\n#ifdef GLUT_WINGDIAPI_DEFINED\n# undef GLUT_WINGDIAPI_DEFINED\n# undef WINGDIAPI\n#endif\n\n#ifdef GLUT_DEFINED___CDECL\n# undef GLUT_DEFINED___CDECL\n# undef __cdecl\n#endif\n\n#ifdef GLUT_DEFINED__CRTIMP\n# undef GLUT_DEFINED__CRTIMP\n# undef _CRTIMP\n#endif\n\n#endif                  /* __glut_h__ */\n"
  },
  {
    "path": "src/3rdparty/glut/lib/glut.def",
    "content": "DESCRIPTION 'OpenGL Utility Toolkit for Win32'\n\nVERSION 3.7\n\nEXPORTS\n\n\tglutAddMenuEntry\n\tglutAddSubMenu\n\tglutAttachMenu\n\tglutBitmapCharacter\n\tglutBitmapLength\n\tglutBitmapWidth\n\tglutButtonBoxFunc\n\tglutChangeToMenuEntry\n\tglutChangeToSubMenu\n\tglutCopyColormap\n\tglutCreateMenu\n\t__glutCreateMenuWithExit\n\tglutCreateSubWindow\n\tglutCreateWindow\n\t__glutCreateWindowWithExit\n\tglutDestroyMenu\n\tglutDestroyWindow\n\tglutDetachMenu\n\tglutDeviceGet\n\tglutDialsFunc\n\tglutDisplayFunc\n\tglutEnterGameMode\n\tglutEntryFunc\n\tglutEstablishOverlay\n\tglutExtensionSupported\n\tglutForceJoystickFunc\n\tglutFullScreen\n\tglutGameModeGet\n\tglutGameModeString\n\tglutGet\n\tglutGetColor\n\tglutGetMenu\n\tglutGetModifiers\n\tglutGetWindow\n\tglutHideOverlay\n\tglutHideWindow\n\tglutIconifyWindow\n\tglutIdleFunc\n\tglutIgnoreKeyRepeat\n\tglutInit\n\t__glutInitWithExit\n\tglutInitDisplayMode\n\tglutInitDisplayString\n\tglutInitWindowPosition\n\tglutInitWindowSize\n\tglutJoystickFunc\n\tglutKeyboardFunc\n\tglutKeyboardUpFunc\n\tglutLayerGet\n\tglutLeaveGameMode\n\tglutMainLoop\n\tglutMenuStateFunc\n\tglutMenuStatusFunc\n\tglutMotionFunc\n\tglutMouseFunc\n\tglutOverlayDisplayFunc\n\tglutPassiveMotionFunc\n\tglutPopWindow\n\tglutPositionWindow\n\tglutPostOverlayRedisplay\n\tglutPostRedisplay\n\tglutPostWindowOverlayRedisplay\n\tglutPostWindowRedisplay\n\tglutPushWindow\n\tglutRemoveMenuItem\n\tglutRemoveOverlay\n\tglutReportErrors\n\tglutReshapeFunc\n\tglutReshapeWindow\n\tglutSetColor\n\tglutSetCursor\n\tglutSetIconTitle\n\tglutSetKeyRepeat\n\tglutSetMenu\n\tglutSetWindow\n\tglutSetWindowTitle\n\tglutSetupVideoResizing\n\tglutShowOverlay\n\tglutShowWindow\n\tglutSolidCone\n\tglutSolidCube\n\tglutSolidDodecahedron\n\tglutSolidIcosahedron\n\tglutSolidOctahedron\n\tglutSolidSphere\n\tglutSolidTeapot\n\tglutSolidTetrahedron\n\tglutSolidTorus\n\tglutSpaceballButtonFunc\n\tglutSpaceballMotionFunc\n\tglutSpaceballRotateFunc\n\tglutSpecialFunc\n\tglutSpecialUpFunc\n\tglutStopVideoResizing\n\tglutStrokeCharacter\n\tglutStrokeLength\n\tglutStrokeWidth\n\tglutSwapBuffers\n\tglutTabletButtonFunc\n\tglutTabletMotionFunc\n\tglutTimerFunc\n\tglutUseLayer\n\tglutVideoPan\n\tglutVideoResize\n\tglutVideoResizeGet\n\tglutVisibilityFunc\n\tglutWarpPointer\n\tglutWindowStatusFunc\n\tglutWireCone\n\tglutWireCube\n\tglutWireDodecahedron\n\tglutWireIcosahedron\n\tglutWireOctahedron\n\tglutWireSphere\n\tglutWireTeapot\n\tglutWireTetrahedron\n\tglutWireTorus\n;    __glutSetFCB\n;    __glutGetFCB\n\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/cv.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OLD_CV_H__\n#define __OPENCV_OLD_CV_H__\n\n#if defined(_MSC_VER)\n    #define CV_DO_PRAGMA(x) __pragma(x)\n    #define __CVSTR2__(x) #x\n    #define __CVSTR1__(x) __CVSTR2__(x)\n    #define __CVMSVCLOC__ __FILE__ \"(\"__CVSTR1__(__LINE__)\") : \"\n    #define CV_MSG_PRAGMA(_msg) CV_DO_PRAGMA(message (__CVMSVCLOC__ _msg))\n#elif defined(__GNUC__)\n    #define CV_DO_PRAGMA(x) _Pragma (#x)\n    #define CV_MSG_PRAGMA(_msg) CV_DO_PRAGMA(message (_msg))\n#else\n    #define CV_DO_PRAGMA(x)\n    #define CV_MSG_PRAGMA(_msg)\n#endif\n#define CV_WARNING(x) CV_MSG_PRAGMA(\"Warning: \" #x)\n\n//CV_WARNING(\"This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module\")\n\n#include \"opencv2/core/core_c.h\"\n#include \"opencv2/imgproc/imgproc_c.h\"\n#include \"opencv2/photo/photo_c.h\"\n#include \"opencv2/video/tracking_c.h\"\n#include \"opencv2/objdetect/objdetect_c.h\"\n\n#if !defined(CV_IMPL)\n#define CV_IMPL extern \"C\"\n#endif //CV_IMPL\n\n#endif // __OPENCV_OLD_CV_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/cv.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OLD_CV_HPP__\n#define __OPENCV_OLD_CV_HPP__\n\n//#if defined(__GNUC__)\n//#warning \"This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module\"\n//#endif\n\n#include \"cv.h\"\n#include \"opencv2/core.hpp\"\n#include \"opencv2/imgproc.hpp\"\n#include \"opencv2/photo.hpp\"\n#include \"opencv2/video.hpp\"\n#include \"opencv2/highgui.hpp\"\n#include \"opencv2/features2d.hpp\"\n#include \"opencv2/calib3d.hpp\"\n#include \"opencv2/objdetect.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/cvaux.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                        Intel License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000, Intel Corporation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of Intel Corporation may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OLD_AUX_H__\n#define __OPENCV_OLD_AUX_H__\n\n//#if defined(__GNUC__)\n//#warning \"This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module\"\n//#endif\n\n#include \"opencv2/core/core_c.h\"\n#include \"opencv2/imgproc/imgproc_c.h\"\n#include \"opencv2/photo/photo_c.h\"\n#include \"opencv2/video/tracking_c.h\"\n#include \"opencv2/objdetect/objdetect_c.h\"\n\n#endif\n\n/* End of file. */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/cvaux.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                        Intel License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000, Intel Corporation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of Intel Corporation may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OLD_AUX_HPP__\n#define __OPENCV_OLD_AUX_HPP__\n\n//#if defined(__GNUC__)\n//#warning \"This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module\"\n//#endif\n\n#include \"cvaux.h\"\n#include \"opencv2/core/utility.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/cvwimage.h",
    "content": "///////////////////////////////////////////////////////////////////////////////\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to\n//  this license.  If you do not agree to this license, do not download,\n//  install, copy or use the software.\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2008, Google, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n//  * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//  * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//  * The name of Intel Corporation or contributors may not be used to endorse\n//     or promote products derived from this software without specific\n//     prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\"\n// and any express or implied warranties, including, but not limited to, the\n// implied warranties of merchantability and fitness for a particular purpose\n// are disclaimed. In no event shall the Intel Corporation or contributors be\n// liable for any direct, indirect, incidental, special, exemplary, or\n// consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n\n\n#ifndef __OPENCV_OLD_WIMAGE_HPP__\n#define __OPENCV_OLD_WIMAGE_HPP__\n\n#include \"opencv2/core/wimage.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/cxcore.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OLD_CXCORE_H__\n#define __OPENCV_OLD_CXCORE_H__\n\n//#if defined(__GNUC__)\n//#warning \"This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module\"\n//#endif\n\n#include \"opencv2/core/core_c.h\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/cxcore.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OLD_CXCORE_HPP__\n#define __OPENCV_OLD_CXCORE_HPP__\n\n//#if defined(__GNUC__)\n//#warning \"This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module\"\n//#endif\n\n#include \"cxcore.h\"\n#include \"opencv2/core.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/cxeigen.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OLD_EIGEN_HPP__\n#define __OPENCV_OLD_EIGEN_HPP__\n\n#include \"opencv2/core/eigen.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/cxmisc.h",
    "content": "#ifndef __OPENCV_OLD_CXMISC_H__\n#define __OPENCV_OLD_CXMISC_H__\n\n#ifdef __cplusplus\n#  include \"opencv2/core/utility.hpp\"\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/highgui.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                        Intel License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000, Intel Corporation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of Intel Corporation may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OLD_HIGHGUI_H__\n#define __OPENCV_OLD_HIGHGUI_H__\n\n#include \"opencv2/core/core_c.h\"\n#include \"opencv2/highgui/highgui_c.h\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv/ml.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                        Intel License Agreement\n//\n// Copyright (C) 2000, Intel Corporation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of Intel Corporation may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OLD_ML_H__\n#define __OPENCV_OLD_ML_H__\n\n#include \"opencv2/core/core_c.h\"\n#include \"opencv2/ml.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/aruco/charuco.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_CHARUCO_HPP__\n#define __OPENCV_CHARUCO_HPP__\n\n#include <opencv2/core.hpp>\n#include <vector>\n#include <opencv2/aruco.hpp>\n\n\nnamespace cv {\nnamespace aruco {\n\n//! @addtogroup aruco\n//! @{\n\n\n/**\n * @brief ChArUco board\n * Specific class for ChArUco boards. A ChArUco board is a planar board where the markers are placed\n * inside the white squares of a chessboard. The benefits of ChArUco boards is that they provide\n * both, ArUco markers versatility and chessboard corner precision, which is important for\n * calibration and pose estimation.\n * This class also allows the easy creation and drawing of ChArUco boards.\n */\nclass CV_EXPORTS CharucoBoard : public Board {\n\n    public:\n    // vector of chessboard 3D corners precalculated\n    std::vector< Point3f > chessboardCorners;\n\n    // for each charuco corner, nearest marker id and nearest marker corner id of each marker\n    std::vector< std::vector< int > > nearestMarkerIdx;\n    std::vector< std::vector< int > > nearestMarkerCorners;\n\n    /**\n     * @brief Draw a ChArUco board\n     *\n     * @param outSize size of the output image in pixels.\n     * @param img output image with the board. The size of this image will be outSize\n     * and the board will be on the center, keeping the board proportions.\n     * @param marginSize minimum margins (in pixels) of the board in the output image\n     * @param borderBits width of the marker borders.\n     *\n     * This function return the image of the ChArUco board, ready to be printed.\n     */\n    void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1);\n\n\n    /**\n     * @brief Create a CharucoBoard object\n     *\n     * @param squaresX number of chessboard squares in X direction\n     * @param squaresY number of chessboard squares in Y direction\n     * @param squareLength chessboard square side length (normally in meters)\n     * @param markerLength marker side length (same unit than squareLength)\n     * @param dictionary dictionary of markers indicating the type of markers.\n     * The first markers in the dictionary are used to fill the white chessboard squares.\n     * @return the output CharucoBoard object\n     *\n     * This functions creates a CharucoBoard object given the number of squares in each direction\n     * and the size of the markers and chessboard squares.\n     */\n    static CharucoBoard create(int squaresX, int squaresY, float squareLength, float markerLength,\n                               Dictionary dictionary);\n\n    /**\n      *\n      */\n    Size getChessboardSize() const { return Size(_squaresX, _squaresY); }\n\n    /**\n      *\n      */\n    float getSquareLength() const { return _squareLength; }\n\n    /**\n      *\n      */\n    float getMarkerLength() const { return _markerLength; }\n\n    private:\n    void _getNearestMarkerCorners();\n\n    // number of markers in X and Y directions\n    int _squaresX, _squaresY;\n\n    // size of chessboard squares side (normally in meters)\n    float _squareLength;\n\n    // marker side lenght (normally in meters)\n    float _markerLength;\n};\n\n\n\n\n/**\n * @brief Interpolate position of ChArUco board corners\n * @param markerCorners vector of already detected markers corners. For each marker, its four\n * corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the\n * dimensions of this array should be Nx4. The order of the corners should be clockwise.\n * @param markerIds list of identifiers for each marker in corners\n * @param image input image necesary for corner refinement. Note that markers are not detected and\n * should be sent in corners and ids parameters.\n * @param board layout of ChArUco board.\n * @param charucoCorners interpolated chessboard corners\n * @param charucoIds interpolated chessboard corners identifiers\n * @param cameraMatrix optional 3x3 floating-point camera matrix\n * \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$\n * @param distCoeffs optional vector of distortion coefficients\n * \\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\\f$ of 4, 5, 8 or 12 elements\n *\n * This function receives the detected markers and returns the 2D position of the chessboard corners\n * from a ChArUco board using the detected Aruco markers. If camera parameters are provided,\n * the process is based in an approximated pose estimation, else it is based on local homography.\n * Only visible corners are returned. For each corner, its corresponding identifier is\n * also returned in charucoIds.\n * The function returns the number of interpolated corners.\n */\nCV_EXPORTS int interpolateCornersCharuco(InputArrayOfArrays markerCorners, InputArray markerIds,\n                                         InputArray image, const CharucoBoard &board,\n                                         OutputArray charucoCorners, OutputArray charucoIds,\n                                         InputArray cameraMatrix = noArray(),\n                                         InputArray distCoeffs = noArray());\n\n\n\n\n/**\n * @brief Pose estimation for a ChArUco board given some of their corners\n * @param charucoCorners vector of detected charuco corners\n * @param charucoIds list of identifiers for each corner in charucoCorners\n * @param board layout of ChArUco board.\n * @param cameraMatrix input 3x3 floating-point camera matrix\n * \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$\n * @param distCoeffs vector of distortion coefficients\n * \\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\\f$ of 4, 5, 8 or 12 elements\n * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board\n * (@sa Rodrigues).\n * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.\n *\n * This function estimates a Charuco board pose from some detected corners.\n * The function checks if the input corners are enough and valid to perform pose estimation.\n * If pose estimation is valid, returns true, else returns false.\n */\nCV_EXPORTS bool estimatePoseCharucoBoard(InputArray charucoCorners, InputArray charucoIds,\n                                         CharucoBoard &board, InputArray cameraMatrix,\n                                         InputArray distCoeffs, OutputArray rvec, OutputArray tvec);\n\n\n\n\n/**\n * @brief Draws a set of Charuco corners\n * @param image input/output image. It must have 1 or 3 channels. The number of channels is not\n * altered.\n * @param charucoCorners vector of detected charuco corners\n * @param charucoIds list of identifiers for each corner in charucoCorners\n * @param cornerColor color of the square surrounding each corner\n *\n * This function draws a set of detected Charuco corners. If identifiers vector is provided, it also\n * draws the id of each corner.\n */\nCV_EXPORTS void drawDetectedCornersCharuco(InputOutputArray image, InputArray charucoCorners,\n                                           InputArray charucoIds = noArray(),\n                                           Scalar cornerColor = Scalar(255, 0, 0));\n\n\n\n/**\n * @brief Calibrate a camera using Charuco corners\n *\n * @param charucoCorners vector of detected charuco corners per frame\n * @param charucoIds list of identifiers for each corner in charucoCorners per frame\n * @param board Marker Board layout\n * @param imageSize input image size\n * @param cameraMatrix Output 3x3 floating-point camera matrix\n * \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ . If CV\\_CALIB\\_USE\\_INTRINSIC\\_GUESS\n * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be\n * initialized before calling the function.\n * @param distCoeffs Output vector of distortion coefficients\n * \\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\\f$ of 4, 5, 8 or 12 elements\n * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view\n * (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding\n * k-th translation vector (see the next output parameter description) brings the board pattern\n * from the model coordinate space (in which object points are specified) to the world coordinate\n * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).\n * @param tvecs Output vector of translation vectors estimated for each pattern view.\n * @param flags flags Different flags  for the calibration process (@sa calibrateCamera)\n * @param criteria Termination criteria for the iterative optimization algorithm.\n *\n * This function calibrates a camera using a set of corners of a  Charuco Board. The function\n * receives a list of detected corners and its identifiers from several views of the Board.\n * The function returns the final re-projection error.\n */\nCV_EXPORTS double calibrateCameraCharuco(\n    InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, const CharucoBoard &board,\n    Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,\n    OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0,\n    TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));\n\n\n\n\n/**\n * @brief Detect ChArUco Diamond markers\n *\n * @param image input image necessary for corner subpixel.\n * @param markerCorners list of detected marker corners from detectMarkers function.\n * @param markerIds list of marker ids in markerCorners.\n * @param squareMarkerLengthRate rate between square and marker length:\n * squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.\n * @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order\n * is the same than in marker corners: top left, top right, bottom right and bottom left. Similar\n * format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).\n * @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of\n * type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the\n * diamond.\n * @param cameraMatrix Optional camera calibration matrix.\n * @param distCoeffs Optional camera distortion coefficients.\n *\n * This function detects Diamond markers from the previous detected ArUco markers. The diamonds\n * are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters\n * are provided, the diamond search is based on reprojection. If not, diamond search is based on\n * homography. Homography is faster than reprojection but can slightly reduce the detection rate.\n */\nCV_EXPORTS void detectCharucoDiamond(InputArray image, InputArrayOfArrays markerCorners,\n                                     InputArray markerIds, float squareMarkerLengthRate,\n                                     OutputArrayOfArrays diamondCorners, OutputArray diamondIds,\n                                     InputArray cameraMatrix = noArray(),\n                                     InputArray distCoeffs = noArray());\n\n\n\n/**\n * @brief Draw a set of detected ChArUco Diamond markers\n *\n * @param image input/output image. It must have 1 or 3 channels. The number of channels is not\n * altered.\n * @param diamondCorners positions of diamond corners in the same format returned by\n * detectCharucoDiamond(). (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,\n * the dimensions of this array should be Nx4. The order of the corners should be clockwise.\n * @param diamondIds vector of identifiers for diamonds in diamondCorners, in the same format\n * returned by detectCharucoDiamond() (e.g. std::vector<Vec4i>).\n * Optional, if not provided, ids are not painted.\n * @param borderColor color of marker borders. Rest of colors (text color and first corner color)\n * are calculated based on this one.\n *\n * Given an array of detected diamonds, this functions draws them in the image. The marker borders\n * are painted and the markers identifiers if provided.\n * Useful for debugging purposes.\n */\nCV_EXPORTS void drawDetectedDiamonds(InputOutputArray image, InputArrayOfArrays diamondCorners,\n                                     InputArray diamondIds = noArray(),\n                                     Scalar borderColor = Scalar(0, 0, 255));\n\n\n\n\n/**\n * @brief Draw a ChArUco Diamond marker\n *\n * @param dictionary dictionary of markers indicating the type of markers.\n * @param ids list of 4 ids for each ArUco marker in the ChArUco marker.\n * @param squareLength size of the chessboard squares in pixels.\n * @param markerLength size of the markers in pixels.\n * @param img output image with the marker. The size of this image will be\n * 3*squareLength + 2*marginSize,.\n * @param marginSize minimum margins (in pixels) of the marker in the output image\n * @param borderBits width of the marker borders.\n *\n * This function return the image of a ChArUco marker, ready to be printed.\n */\nCV_EXPORTS void drawCharucoDiamond(Dictionary dictionary, Vec4i ids, int squareLength,\n                                   int markerLength, OutputArray img, int marginSize = 0,\n                                   int borderBits = 1);\n\n\n\n\n//! @}\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/aruco/dictionary.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_DICTIONARY_HPP__\n#define __OPENCV_DICTIONARY_HPP__\n\n#include <opencv2/core.hpp>\n\nnamespace cv {\nnamespace aruco {\n\n//! @addtogroup aruco\n//! @{\n\n\n/**\n * @brief Dictionary/Set of markers. It contains the inner codification\n *\n * bytesList contains the marker codewords where\n * - bytesList.rows is the dictionary size\n * - each marker is encoded using `nbytes = ceil(markerSize*markerSize/8.)`\n * - each row contains all 4 rotations of the marker, so its length is `4*nbytes`\n *\n * `bytesList.ptr(i)[k*nbytes + j]` is then the j-th byte of i-th marker, in its k-th rotation.\n */\nclass CV_EXPORTS Dictionary {\n\n    public:\n    Mat bytesList;         // marker code information\n    int markerSize;        // number of bits per dimension\n    int maxCorrectionBits; // maximum number of bits that can be corrected\n\n\n    /**\n      */\n    Dictionary(const Mat &_bytesList = Mat(), int _markerSize = 0, int _maxcorr = 0);\n\n\n\n    /**\n     * @brief Given a matrix of bits. Returns whether if marker is identified or not.\n     * It returns by reference the correct id (if any) and the correct rotation\n     */\n    bool identify(const Mat &onlyBits, int &idx, int &rotation, double maxCorrectionRate) const;\n\n    /**\n      * @brief Returns the distance of the input bits to the specific id. If allRotations is true,\n      * the four posible bits rotation are considered\n      */\n    int getDistanceToId(InputArray bits, int id, bool allRotations = true) const;\n\n\n    /**\n     * @brief Draw a canonical marker image\n     */\n    void drawMarker(int id, int sidePixels, OutputArray _img, int borderBits = 1) const;\n\n\n    /**\n      * @brief Transform matrix of bits to list of bytes in the 4 rotations\n      */\n    static Mat getByteListFromBits(const Mat &bits);\n\n\n    /**\n      * @brief Transform list of bytes to matrix of bits\n      */\n    static Mat getBitsFromByteList(const Mat &byteList, int markerSize);\n};\n\n\n\n\n/**\n * @brief Predefined markers dictionaries/sets\n * Each dictionary indicates the number of bits and the number of markers contained\n * - DICT_ARUCO: standard ArUco Library Markers. 1024 markers, 5x5 bits, 0 minimum distance\n */\nenum PREDEFINED_DICTIONARY_NAME {\n    DICT_4X4_50 = 0,\n    DICT_4X4_100,\n    DICT_4X4_250,\n    DICT_4X4_1000,\n    DICT_5X5_50,\n    DICT_5X5_100,\n    DICT_5X5_250,\n    DICT_5X5_1000,\n    DICT_6X6_50,\n    DICT_6X6_100,\n    DICT_6X6_250,\n    DICT_6X6_1000,\n    DICT_7X7_50,\n    DICT_7X7_100,\n    DICT_7X7_250,\n    DICT_7X7_1000,\n    DICT_ARUCO_ORIGINAL\n};\n\n\n/**\n  * @brief Returns one of the predefined dictionaries defined in PREDEFINED_DICTIONARY_NAME\n  */\nCV_EXPORTS const Dictionary &getPredefinedDictionary(PREDEFINED_DICTIONARY_NAME name);\n\n\n/**\n  * @brief Generates a new customizable marker dictionary\n  *\n  * @param nMarkers number of markers in the dictionary\n  * @param markerSize number of bits per dimension of each markers\n  * @param baseDictionary Include the markers in this dictionary at the beginning (optional)\n  *\n  * This function creates a new dictionary composed by nMarkers markers and each markers composed\n  * by markerSize x markerSize bits. If baseDictionary is provided, its markers are directly\n  * included and the rest are generated based on them. If the size of baseDictionary is higher\n  * than nMarkers, only the first nMarkers in baseDictionary are taken and no new marker is added.\n  */\nCV_EXPORTS Dictionary generateCustomDictionary(int nMarkers, int markerSize,\n                                               const Dictionary &baseDictionary = Dictionary());\n\n\n\n//! @}\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/aruco.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_ARUCO_HPP__\n#define __OPENCV_ARUCO_HPP__\n\n#include <opencv2/core.hpp>\n#include <vector>\n#include \"opencv2/aruco/dictionary.hpp\"\n\n/**\n * @defgroup aruco ArUco Marker Detection\n * This module is dedicated to square fiducial markers (also known as Augmented Reality Markers)\n * These markers are useful for easy, fast and robust camera pose estimation.ç\n *\n * The main functionalities are:\n * - Detection of markers in a image\n * - Pose estimation from a single marker or from a board/set of markers\n * - Detection of ChArUco board for high subpixel accuracy\n * - Camera calibration from both, ArUco boards and ChArUco boards.\n * - Detection of ChArUco diamond markers\n * The samples directory includes easy examples of how to use the module.\n *\n * The implementation is based on the ArUco Library by R. Muñoz-Salinas and S. Garrido-Jurado.\n *\n * @sa S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014.\n * \"Automatic generation and detection of highly reliable fiducial markers under occlusion\".\n * Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005\n *\n * @sa http://www.uco.es/investiga/grupos/ava/node/26\n *\n * This module has been originally developed by Sergio Garrido-Jurado as a project\n * for Google Summer of Code 2015 (GSoC 15).\n *\n *\n*/\n\nnamespace cv {\nnamespace aruco {\n\n//! @addtogroup aruco\n//! @{\n\n\n\n/**\n * @brief Parameters for the detectMarker process:\n * - adaptiveThreshWinSizeMin: minimum window size for adaptive thresholding before finding\n *   contours (default 3).\n * - adaptiveThreshWinSizeMax: maximum window size for adaptive thresholding before finding\n *   contours (default 23).\n * - adaptiveThreshWinSizeStep: increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax\n *   during the thresholding (default 10).\n * - adaptiveThreshConstant: constant for adaptive thresholding before finding contours (default 7)\n * - minMarkerPerimeterRate: determine minimum perimeter for marker contour to be detected. This\n *   is defined as a rate respect to the maximum dimension of the input image (default 0.03).\n * - maxMarkerPerimeterRate:  determine maximum perimeter for marker contour to be detected. This\n *   is defined as a rate respect to the maximum dimension of the input image (default 4.0).\n * - polygonalApproxAccuracyRate: minimum accuracy during the polygonal approximation process to\n *   determine which contours are squares.\n * - minCornerDistanceRate: minimum distance between corners for detected markers relative to its\n *   perimeter (default 0.05)\n * - minDistanceToBorder: minimum distance of any corner to the image border for detected markers\n *   (in pixels) (default 3)\n * - minMarkerDistanceRate: minimum mean distance beetween two marker corners to be considered\n *   similar, so that the smaller one is removed. The rate is relative to the smaller perimeter\n *   of the two markers (default 0.05).\n * - doCornerRefinement: do subpixel refinement or not\n * - cornerRefinementWinSize: window size for the corner refinement process (in pixels) (default 5).\n * - cornerRefinementMaxIterations: maximum number of iterations for stop criteria of the corner\n *   refinement process (default 30).\n * - cornerRefinementMinAccuracy: minimum error for the stop cristeria of the corner refinement\n *   process (default: 0.1)\n * - markerBorderBits: number of bits of the marker border, i.e. marker border width (default 1).\n * - perpectiveRemovePixelPerCell: number of bits (per dimension) for each cell of the marker\n *   when removing the perspective (default 8).\n * - perspectiveRemoveIgnoredMarginPerCell: width of the margin of pixels on each cell not\n *   considered for the determination of the cell bit. Represents the rate respect to the total\n *   size of the cell, i.e. perpectiveRemovePixelPerCell (default 0.13)\n * - maxErroneousBitsInBorderRate: maximum number of accepted erroneous bits in the border (i.e.\n *   number of allowed white bits in the border). Represented as a rate respect to the total\n *   number of bits per marker (default 0.35).\n * - minOtsuStdDev: minimun standard deviation in pixels values during the decodification step to\n *   apply Otsu thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher\n *   than 128 or not) (default 5.0)\n * - errorCorrectionRate error correction rate respect to the maximun error correction capability\n *   for each dictionary. (default 0.6).\n */\nstruct CV_EXPORTS DetectorParameters {\n\n    DetectorParameters();\n\n    int adaptiveThreshWinSizeMin;\n    int adaptiveThreshWinSizeMax;\n    int adaptiveThreshWinSizeStep;\n    double adaptiveThreshConstant;\n    double minMarkerPerimeterRate;\n    double maxMarkerPerimeterRate;\n    double polygonalApproxAccuracyRate;\n    double minCornerDistanceRate;\n    int minDistanceToBorder;\n    double minMarkerDistanceRate;\n    bool doCornerRefinement;\n    int cornerRefinementWinSize;\n    int cornerRefinementMaxIterations;\n    double cornerRefinementMinAccuracy;\n    int markerBorderBits;\n    int perspectiveRemovePixelPerCell;\n    double perspectiveRemoveIgnoredMarginPerCell;\n    double maxErroneousBitsInBorderRate;\n    double minOtsuStdDev;\n    double errorCorrectionRate;\n};\n\n\n\n/**\n * @brief Basic marker detection\n *\n * @param image input image\n * @param dictionary indicates the type of markers that will be searched\n * @param corners vector of detected marker corners. For each marker, its four corners\n * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,\n * the dimensions of this array is Nx4. The order of the corners is clockwise.\n * @param ids vector of identifiers of the detected markers. The identifier is of type int\n * (e.g. std::vector<int>). For N detected markers, the size of ids is also N.\n * The identifiers have the same order than the markers in the imgPoints array.\n * @param parameters marker detection parameters\n * @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a\n * correct codification. Useful for debugging purposes.\n *\n * Performs marker detection in the input image. Only markers included in the specific dictionary\n * are searched. For each detected marker, it returns the 2D position of its corner in the image\n * and its corresponding identifier.\n * Note that this function does not perform pose estimation.\n * @sa estimatePoseSingleMarkers,  estimatePoseBoard\n *\n */\nCV_EXPORTS void detectMarkers(InputArray image, Dictionary dictionary, OutputArrayOfArrays corners,\n                              OutputArray ids, DetectorParameters parameters = DetectorParameters(),\n                              OutputArrayOfArrays rejectedImgPoints = noArray());\n\n\n\n/**\n * @brief Pose estimation for single markers\n *\n * @param corners vector of already detected markers corners. For each marker, its four corners\n * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,\n * the dimensions of this array should be Nx4. The order of the corners should be clockwise.\n * @sa detectMarkers\n * @param markerLength the length of the markers' side. The returning translation vectors will\n * be in the same unit. Normally, unit is meters.\n * @param cameraMatrix input 3x3 floating-point camera matrix\n * \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$\n * @param distCoeffs vector of distortion coefficients\n * \\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\\f$ of 4, 5, 8 or 12 elements\n * @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector<cv::Vec3d>>).\n * Each element in rvecs corresponds to the specific marker in imgPoints.\n * @param tvecs array of output translation vectors (e.g. std::vector<cv::Vec3d>>).\n * Each element in tvecs corresponds to the specific marker in imgPoints.\n *\n * This function receives the detected markers and returns their pose estimation respect to\n * the camera individually. So for each marker, one rotation and translation vector is returned.\n * The returned transformation is the one that transforms points from each marker coordinate system\n * to the camera coordinate system.\n * The marker corrdinate system is centered on the middle of the marker, with the Z axis\n * perpendicular to the marker plane.\n * The coordinates of the four corners of the marker in its own coordinate system are:\n * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0),\n * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)\n */\nCV_EXPORTS void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength,\n                                          InputArray cameraMatrix, InputArray distCoeffs,\n                                          OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs);\n\n\n\n/**\n * @brief Board of markers\n *\n * A board is a set of markers in the 3D space with a common cordinate system.\n * The common form of a board of marker is a planar (2D) board, however any 3D layout can be used.\n * A Board object is composed by:\n * - The object points of the marker corners, i.e. their coordinates respect to the board system.\n * - The dictionary which indicates the type of markers of the board\n * - The identifier of all the markers in the board.\n */\nclass CV_EXPORTS Board {\n\n    public:\n    // array of object points of all the marker corners in the board\n    // each marker include its 4 corners, i.e. for M markers, the size is Mx4\n    std::vector< std::vector< Point3f > > objPoints;\n\n    // the dictionary of markers employed for this board\n    Dictionary dictionary;\n\n    // vector of the identifiers of the markers in the board (same size than objPoints)\n    // The identifiers refers to the board dictionary\n    std::vector< int > ids;\n};\n\n\n\n/**\n * @brief Planar board with grid arrangement of markers\n * More common type of board. All markers are placed in the same plane in a grid arrangment.\n * The board can be drawn using drawPlanarBoard() function (@sa drawPlanarBoard)\n */\nclass CV_EXPORTS GridBoard : public Board {\n\n    public:\n    /**\n     * @brief Draw a GridBoard\n     *\n     * @param outSize size of the output image in pixels.\n     * @param img output image with the board. The size of this image will be outSize\n     * and the board will be on the center, keeping the board proportions.\n     * @param marginSize minimum margins (in pixels) of the board in the output image\n     * @param borderBits width of the marker borders.\n     *\n     * This function return the image of the GridBoard, ready to be printed.\n     */\n    void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1);\n\n\n    /**\n     * @brief Create a GridBoard object\n     *\n     * @param markersX number of markers in X direction\n     * @param markersY number of markers in Y direction\n     * @param markerLength marker side length (normally in meters)\n     * @param markerSeparation separation between two markers (same unit than markerLenght)\n     * @param dictionary dictionary of markers indicating the type of markers.\n     * The first markersX*markersY markers in the dictionary are used.\n     * @return the output GridBoard object\n     *\n     * This functions creates a GridBoard object given the number of markers in each direction and\n     * the marker size and marker separation.\n     */\n    static GridBoard create(int markersX, int markersY, float markerLength, float markerSeparation,\n                            Dictionary dictionary);\n\n    /**\n      *\n      */\n    Size getGridSize() const { return Size(_markersX, _markersY); }\n\n    /**\n      *\n      */\n    float getMarkerLength() const { return _markerLength; }\n\n    /**\n      *\n      */\n    float getMarkerSeparation() const { return _markerSeparation; }\n\n\n    private:\n    // number of markers in X and Y directions\n    int _markersX, _markersY;\n\n    // marker side lenght (normally in meters)\n    float _markerLength;\n\n    // separation between markers in the grid\n    float _markerSeparation;\n};\n\n\n\n/**\n * @brief Pose estimation for a board of markers\n *\n * @param corners vector of already detected markers corners. For each marker, its four corners\n * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the\n * dimensions of this array should be Nx4. The order of the corners should be clockwise.\n * @param ids list of identifiers for each marker in corners\n * @param board layout of markers in the board. The layout is composed by the marker identifiers\n * and the positions of each marker corner in the board reference system.\n * @param cameraMatrix input 3x3 floating-point camera matrix\n * \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$\n * @param distCoeffs vector of distortion coefficients\n * \\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\\f$ of 4, 5, 8 or 12 elements\n * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board\n * (@sa Rodrigues).\n * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.\n *\n * This function receives the detected markers and returns the pose of a marker board composed\n * by those markers.\n * A Board of marker has a single world coordinate system which is defined by the board layout.\n * The returned transformation is the one that transforms points from the board coordinate system\n * to the camera coordinate system.\n * Input markers that are not included in the board layout are ignored.\n * The function returns the number of markers from the input employed for the board pose estimation.\n * Note that returning a 0 means the pose has not been estimated.\n */\nCV_EXPORTS int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Board &board,\n                                 InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec,\n                                 OutputArray tvec);\n\n\n\n\n/**\n * @brief Refind not detected markers based on the already detected and the board layout\n *\n * @param image input image\n * @param board layout of markers in the board.\n * @param detectedCorners vector of already detected marker corners.\n * @param detectedIds vector of already detected marker identifiers.\n * @param rejectedCorners vector of rejected candidates during the marker detection process.\n * @param cameraMatrix optional input 3x3 floating-point camera matrix\n * \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$\n * @param distCoeffs optional vector of distortion coefficients\n * \\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\\f$ of 4, 5, 8 or 12 elements\n * @param minRepDistance minimum distance between the corners of the rejected candidate and the\n * reprojected marker in order to consider it as a correspondence.\n * @param errorCorrectionRate rate of allowed erroneous bits respect to the error correction\n * capability of the used dictionary. -1 ignores the error correction step.\n * @param checkAllOrders Consider the four posible corner orders in the rejectedCorners array.\n * If it set to false, only the provided corner order is considered (default true).\n * @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the\n * original rejectedCorners array.\n * @param parameters marker detection parameters\n *\n * This function tries to find markers that were not detected in the basic detecMarkers function.\n * First, based on the current detected marker and the board layout, the function interpolates\n * the position of the missing markers. Then it tries to find correspondence between the reprojected\n * markers and the rejected candidates based on the minRepDistance and errorCorrectionRate\n * parameters.\n * If camera parameters and distortion coefficients are provided, missing markers are reprojected\n * using projectPoint function. If not, missing marker projections are interpolated using global\n * homography, and all the marker corners in the board must have the same Z coordinate.\n */\nCV_EXPORTS void refineDetectedMarkers(\n    InputArray image, const Board &board, InputOutputArrayOfArrays detectedCorners,\n    InputOutputArray detectedIds, InputOutputArray rejectedCorners,\n    InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(),\n    float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true,\n    OutputArray recoveredIdxs = noArray(), DetectorParameters parameters = DetectorParameters());\n\n\n\n/**\n * @brief Draw detected markers in image\n *\n * @param image input/output image. It must have 1 or 3 channels. The number of channels is not\n * altered.\n * @param corners positions of marker corners on input image.\n * (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the dimensions of\n * this array should be Nx4. The order of the corners should be clockwise.\n * @param ids vector of identifiers for markers in markersCorners .\n * Optional, if not provided, ids are not painted.\n * @param borderColor color of marker borders. Rest of colors (text color and first corner color)\n * are calculated based on this one to improve visualization.\n *\n * Given an array of detected marker corners and its corresponding ids, this functions draws\n * the markers in the image. The marker borders are painted and the markers identifiers if provided.\n * Useful for debugging purposes.\n */\nCV_EXPORTS void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners,\n                                    InputArray ids = noArray(),\n                                    Scalar borderColor = Scalar(0, 255, 0));\n\n\n\n/**\n * @brief Draw coordinate system axis from pose estimation\n *\n * @param image input/output image. It must have 1 or 3 channels. The number of channels is not\n * altered.\n * @param cameraMatrix input 3x3 floating-point camera matrix\n * \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$\n * @param distCoeffs vector of distortion coefficients\n * \\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\\f$ of 4, 5, 8 or 12 elements\n * @param rvec rotation vector of the coordinate system that will be drawn. (@sa Rodrigues).\n * @param tvec translation vector of the coordinate system that will be drawn.\n * @param length length of the painted axis in the same unit than tvec (usually in meters)\n *\n * Given the pose estimation of a marker or board, this function draws the axis of the world\n * coordinate system, i.e. the system centered on the marker/board. Useful for debugging purposes.\n */\nCV_EXPORTS void drawAxis(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs,\n                         InputArray rvec, InputArray tvec, float length);\n\n\n\n/**\n * @brief Draw a canonical marker image\n *\n * @param dictionary dictionary of markers indicating the type of markers\n * @param id identifier of the marker that will be returned. It has to be a valid id\n * in the specified dictionary.\n * @param sidePixels size of the image in pixels\n * @param img output image with the marker\n * @param borderBits width of the marker border.\n *\n * This function returns a marker image in its canonical form (i.e. ready to be printed)\n */\nCV_EXPORTS void drawMarker(Dictionary dictionary, int id, int sidePixels, OutputArray img,\n                           int borderBits = 1);\n\n\n\n/**\n * @brief Draw a planar board\n *\n * @param board layout of the board that will be drawn. The board should be planar,\n * z coordinate is ignored\n * @param outSize size of the output image in pixels.\n * @param img output image with the board. The size of this image will be outSize\n * and the board will be on the center, keeping the board proportions.\n * @param marginSize minimum margins (in pixels) of the board in the output image\n * @param borderBits width of the marker borders.\n *\n * This function return the image of a planar board, ready to be printed. It assumes\n * the Board layout specified is planar by ignoring the z coordinates of the object points.\n */\nCV_EXPORTS void drawPlanarBoard(const Board &board, Size outSize, OutputArray img,\n                                int marginSize = 0, int borderBits = 1);\n\n\n\n/**\n * @brief Calibrate a camera using aruco markers\n *\n * @param corners vector of detected marker corners in all frames.\n * The corners should have the same format returned by detectMarkers (@sa detectMarkers).\n * @param ids list of identifiers for each marker in corners\n * @param counter number of markers in each frame so that corners and ids can be split\n * @param board Marker Board layout\n * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.\n * @param cameraMatrix Output 3x3 floating-point camera matrix\n * \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ . If CV\\_CALIB\\_USE\\_INTRINSIC\\_GUESS\n * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be\n * initialized before calling the function.\n * @param distCoeffs Output vector of distortion coefficients\n * \\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\\f$ of 4, 5, 8 or 12 elements\n * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view\n * (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding\n * k-th translation vector (see the next output parameter description) brings the board pattern\n * from the model coordinate space (in which object points are specified) to the world coordinate\n * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).\n * @param tvecs Output vector of translation vectors estimated for each pattern view.\n * @param flags flags Different flags  for the calibration process (@sa calibrateCamera)\n * @param criteria Termination criteria for the iterative optimization algorithm.\n *\n * This function calibrates a camera using an Aruco Board. The function receives a list of\n * detected markers from several views of the Board. The process is similar to the chessboard\n * calibration in calibrateCamera(). The function returns the final re-projection error.\n */\nCV_EXPORTS double calibrateCameraAruco(\n    InputArrayOfArrays corners, InputArray ids, InputArray counter, const Board &board,\n    Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,\n    OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0,\n    TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));\n\n\n\n//! @}\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/bgsegm.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_BGSEGM_HPP__\n#define __OPENCV_BGSEGM_HPP__\n\n#include \"opencv2/video.hpp\"\n\n#ifdef __cplusplus\n\n/** @defgroup bgsegm Improved Background-Foreground Segmentation Methods\n*/\n\nnamespace cv\n{\nnamespace bgsegm\n{\n\n//! @addtogroup bgsegm\n//! @{\n\n/** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.\n\nThe class implements the algorithm described in @cite KB2001 .\n */\nclass CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor\n{\npublic:\n    CV_WRAP virtual int getHistory() const = 0;\n    CV_WRAP virtual void setHistory(int nframes) = 0;\n\n    CV_WRAP virtual int getNMixtures() const = 0;\n    CV_WRAP virtual void setNMixtures(int nmix) = 0;\n\n    CV_WRAP virtual double getBackgroundRatio() const = 0;\n    CV_WRAP virtual void setBackgroundRatio(double backgroundRatio) = 0;\n\n    CV_WRAP virtual double getNoiseSigma() const = 0;\n    CV_WRAP virtual void setNoiseSigma(double noiseSigma) = 0;\n};\n\n/** @brief Creates mixture-of-gaussian background subtractor\n\n@param history Length of the history.\n@param nmixtures Number of Gaussian mixtures.\n@param backgroundRatio Background ratio.\n@param noiseSigma Noise strength (standard deviation of the brightness or each color channel). 0\nmeans some automatic value.\n */\nCV_EXPORTS_W Ptr<BackgroundSubtractorMOG>\n    createBackgroundSubtractorMOG(int history=200, int nmixtures=5,\n                                  double backgroundRatio=0.7, double noiseSigma=0);\n\n\n/** @brief Background Subtractor module based on the algorithm given in @cite Gold2012 .\n\n Takes a series of images and returns a sequence of mask (8UC1)\n images of the same size, where 255 indicates Foreground and 0 represents Background.\n This class implements an algorithm described in \"Visual Tracking of Human Visitors under\n Variable-Lighting Conditions for a Responsive Audio Art Installation,\" A. Godbehere,\n A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.\n */\nclass CV_EXPORTS_W BackgroundSubtractorGMG : public BackgroundSubtractor\n{\npublic:\n    /** @brief Returns total number of distinct colors to maintain in histogram.\n    */\n    CV_WRAP virtual int getMaxFeatures() const = 0;\n    /** @brief Sets total number of distinct colors to maintain in histogram.\n    */\n    CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;\n\n    /** @brief Returns the learning rate of the algorithm.\n\n    It lies between 0.0 and 1.0. It determines how quickly features are \"forgotten\" from\n    histograms.\n     */\n    CV_WRAP virtual double getDefaultLearningRate() const = 0;\n    /** @brief Sets the learning rate of the algorithm.\n    */\n    CV_WRAP virtual void setDefaultLearningRate(double lr) = 0;\n\n    /** @brief Returns the number of frames used to initialize background model.\n    */\n    CV_WRAP virtual int getNumFrames() const = 0;\n    /** @brief Sets the number of frames used to initialize background model.\n    */\n    CV_WRAP virtual void setNumFrames(int nframes) = 0;\n\n    /** @brief Returns the parameter used for quantization of color-space.\n\n    It is the number of discrete levels in each channel to be used in histograms.\n     */\n    CV_WRAP virtual int getQuantizationLevels() const = 0;\n    /** @brief Sets the parameter used for quantization of color-space\n    */\n    CV_WRAP virtual void setQuantizationLevels(int nlevels) = 0;\n\n    /** @brief Returns the prior probability that each individual pixel is a background pixel.\n    */\n    CV_WRAP virtual double getBackgroundPrior() const = 0;\n    /** @brief Sets the prior probability that each individual pixel is a background pixel.\n    */\n    CV_WRAP virtual void setBackgroundPrior(double bgprior) = 0;\n\n    /** @brief Returns the kernel radius used for morphological operations\n    */\n    CV_WRAP virtual int getSmoothingRadius() const = 0;\n    /** @brief Sets the kernel radius used for morphological operations\n    */\n    CV_WRAP virtual void setSmoothingRadius(int radius) = 0;\n\n    /** @brief Returns the value of decision threshold.\n\n    Decision value is the value above which pixel is determined to be FG.\n     */\n    CV_WRAP virtual double getDecisionThreshold() const = 0;\n    /** @brief Sets the value of decision threshold.\n    */\n    CV_WRAP virtual void setDecisionThreshold(double thresh) = 0;\n\n    /** @brief Returns the status of background model update\n    */\n    CV_WRAP virtual bool getUpdateBackgroundModel() const = 0;\n    /** @brief Sets the status of background model update\n    */\n    CV_WRAP virtual void setUpdateBackgroundModel(bool update) = 0;\n\n    /** @brief Returns the minimum value taken on by pixels in image sequence. Usually 0.\n    */\n    CV_WRAP virtual double getMinVal() const = 0;\n    /** @brief Sets the minimum value taken on by pixels in image sequence.\n    */\n    CV_WRAP virtual void setMinVal(double val) = 0;\n\n    /** @brief Returns the maximum value taken on by pixels in image sequence. e.g. 1.0 or 255.\n    */\n    CV_WRAP virtual double getMaxVal() const = 0;\n    /** @brief Sets the maximum value taken on by pixels in image sequence.\n    */\n    CV_WRAP virtual void setMaxVal(double val) = 0;\n};\n\n/** @brief Creates a GMG Background Subtractor\n\n@param initializationFrames number of frames used to initialize the background models.\n@param decisionThreshold Threshold value, above which it is marked foreground, else background.\n */\nCV_EXPORTS_W Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames=120,\n                                                                        double decisionThreshold=0.8);                                  \n\n//! @}\n\n}\n}\n\n#endif\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/bioinspired/bioinspired.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/bioinspired.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/bioinspired/retina.hpp",
    "content": "/*#******************************************************************************\n ** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n **\n ** By downloading, copying, installing or using the software you agree to this license.\n ** If you do not agree to this license, do not download, install,\n ** copy or use the software.\n **\n **\n ** bioinspired : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab.\n ** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping.\n **\n ** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications)\n **\n **  Creation - enhancement process 2007-2015\n **      Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France\n **\n ** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr).\n ** Refer to the following research paper for more information:\n ** Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011\n ** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book:\n ** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.\n **\n ** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author :\n ** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper:\n ** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). \"Efficient demosaicing through recursive filtering\", IEEE International Conference on Image Processing ICIP 2007\n ** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions.\n ** ====> more informations in the above cited Jeanny Heraults's book.\n **\n **                          License Agreement\n **               For Open Source Computer Vision Library\n **\n ** Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n ** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.\n **\n **               For Human Visual System tools (bioinspired)\n ** Copyright (C) 2007-2015, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved.\n **\n ** Third party copyrights are property of their respective owners.\n **\n ** Redistribution and use in source and binary forms, with or without modification,\n ** are permitted provided that the following conditions are met:\n **\n ** * Redistributions of source code must retain the above copyright notice,\n **    this list of conditions and the following disclaimer.\n **\n ** * Redistributions in binary form must reproduce the above copyright notice,\n **    this list of conditions and the following disclaimer in the documentation\n **    and/or other materials provided with the distribution.\n **\n ** * The name of the copyright holders may not be used to endorse or promote products\n **    derived from this software without specific prior written permission.\n **\n ** This software is provided by the copyright holders and contributors \"as is\" and\n ** any express or implied warranties, including, but not limited to, the implied\n ** warranties of merchantability and fitness for a particular purpose are disclaimed.\n ** In no event shall the Intel Corporation or contributors be liable for any direct,\n ** indirect, incidental, special, exemplary, or consequential damages\n ** (including, but not limited to, procurement of substitute goods or services;\n ** loss of use, data, or profits; or business interruption) however caused\n ** and on any theory of liability, whether in contract, strict liability,\n ** or tort (including negligence or otherwise) arising in any way out of\n ** the use of this software, even if advised of the possibility of such damage.\n *******************************************************************************/\n\n#ifndef __OPENCV_BIOINSPIRED_RETINA_HPP__\n#define __OPENCV_BIOINSPIRED_RETINA_HPP__\n\n/**\n@file\n@date Jul 19, 2011\n@author Alexandre Benoit\n*/\n\n#include \"opencv2/core.hpp\" // for all OpenCV core functionalities access, including cv::Exception support\n\n\nnamespace cv{\nnamespace bioinspired{\n\n//! @addtogroup bioinspired\n//! @{\n\nenum {\n    RETINA_COLOR_RANDOM, //!< each pixel position is either R, G or B in a random choice\n    RETINA_COLOR_DIAGONAL,//!< color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...\n    RETINA_COLOR_BAYER//!< standard bayer sampling\n};\n\n\n/** @brief retina model parameters structure\n\n    For better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel\n\n    Here is the default configuration file of the retina module. It gives results such as the first\n    retina output shown on the top of this page.\n\n    @code{xml}\n    <?xml version=\"1.0\"?>\n    <opencv_storage>\n    <OPLandIPLparvo>\n        <colorMode>1</colorMode>\n        <normaliseOutput>1</normaliseOutput>\n        <photoreceptorsLocalAdaptationSensitivity>7.5e-01</photoreceptorsLocalAdaptationSensitivity>\n        <photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>\n        <photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>\n        <horizontalCellsGain>0.01</horizontalCellsGain>\n        <hcellsTemporalConstant>0.5</hcellsTemporalConstant>\n        <hcellsSpatialConstant>7.</hcellsSpatialConstant>\n        <ganglionCellsSensitivity>7.5e-01</ganglionCellsSensitivity></OPLandIPLparvo>\n    <IPLmagno>\n        <normaliseOutput>1</normaliseOutput>\n        <parasolCells_beta>0.</parasolCells_beta>\n        <parasolCells_tau>0.</parasolCells_tau>\n        <parasolCells_k>7.</parasolCells_k>\n        <amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>\n        <V0CompressionParameter>9.5e-01</V0CompressionParameter>\n        <localAdaptintegration_tau>0.</localAdaptintegration_tau>\n        <localAdaptintegration_k>7.</localAdaptintegration_k></IPLmagno>\n    </opencv_storage>\n    @endcode\n\n    Here is the 'realistic\" setup used to obtain the second retina output shown on the top of this page.\n\n    @code{xml}\n    <?xml version=\"1.0\"?>\n    <opencv_storage>\n    <OPLandIPLparvo>\n      <colorMode>1</colorMode>\n      <normaliseOutput>1</normaliseOutput>\n      <photoreceptorsLocalAdaptationSensitivity>8.9e-01</photoreceptorsLocalAdaptationSensitivity>\n      <photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>\n      <photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>\n      <horizontalCellsGain>0.3</horizontalCellsGain>\n      <hcellsTemporalConstant>0.5</hcellsTemporalConstant>\n      <hcellsSpatialConstant>7.</hcellsSpatialConstant>\n      <ganglionCellsSensitivity>8.9e-01</ganglionCellsSensitivity></OPLandIPLparvo>\n    <IPLmagno>\n      <normaliseOutput>1</normaliseOutput>\n      <parasolCells_beta>0.</parasolCells_beta>\n      <parasolCells_tau>0.</parasolCells_tau>\n      <parasolCells_k>7.</parasolCells_k>\n      <amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>\n      <V0CompressionParameter>9.5e-01</V0CompressionParameter>\n      <localAdaptintegration_tau>0.</localAdaptintegration_tau>\n      <localAdaptintegration_k>7.</localAdaptintegration_k></IPLmagno>\n    </opencv_storage>\n    @endcode\n      */\n    struct RetinaParameters{ \n        //! Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters\n        struct OPLandIplParvoParameters{\n               OPLandIplParvoParameters():colorMode(true),\n                                 normaliseOutput(true),\n                                 photoreceptorsLocalAdaptationSensitivity(0.75f),\n                                 photoreceptorsTemporalConstant(0.9f),\n                                 photoreceptorsSpatialConstant(0.53f),\n                                 horizontalCellsGain(0.01f),\n                                 hcellsTemporalConstant(0.5f),\n                                 hcellsSpatialConstant(7.f),\n                                 ganglionCellsSensitivity(0.75f) { } // default setup\n               bool colorMode, normaliseOutput;\n               float photoreceptorsLocalAdaptationSensitivity, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, hcellsTemporalConstant, hcellsSpatialConstant, ganglionCellsSensitivity;\n        };\n        //! Inner Plexiform Layer Magnocellular channel (IplMagno)\n        struct IplMagnoParameters{\n            IplMagnoParameters():\n                          normaliseOutput(true),\n                          parasolCells_beta(0.f),\n                          parasolCells_tau(0.f),\n                          parasolCells_k(7.f),\n                          amacrinCellsTemporalCutFrequency(2.0f),\n                          V0CompressionParameter(0.95f),\n                          localAdaptintegration_tau(0.f),\n                          localAdaptintegration_k(7.f) { } // default setup\n            bool normaliseOutput;\n            float parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k;\n        };\n        OPLandIplParvoParameters OPLandIplParvo;\n        IplMagnoParameters IplMagno;\n    };\n\n\n\n/** @brief class which allows the Gipsa/Listic Labs model to be used with OpenCV.\n\nThis retina model allows spatio-temporal image processing (applied on still images, video sequences).\nAs a summary, these are the retina model properties:\n- It applies a spectral whithening (mid-frequency details enhancement)\n- high frequency spatio-temporal noise reduction\n- low frequency luminance to be reduced (luminance range compression)\n- local logarithmic luminance compression allows details to be enhanced in low light conditions\n\nUSE : this model can be used basically for spatio-temporal video effects but also for :\n     _using the getParvo method output matrix : texture analysiswith enhanced signal to noise ratio and enhanced details robust against input images luminance ranges\n     _using the getMagno method output matrix : motion analysis also with the previously cited properties\n\nfor more information, reer to the following papers :\nBenoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011\nVision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.\n\nThe retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author :\ntake a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper:\nB. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). \"Efficient demosaicing through recursive filtering\", IEEE International Conference on Image Processing ICIP 2007\ntake a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions.\nmore informations in the above cited Jeanny Heraults's book.\n */\nclass CV_EXPORTS_W Retina : public Algorithm {\n\npublic:\n\n    \n    /** @brief Retreive retina input buffer size\n    @return the retina input buffer size\n     */\n    CV_WRAP virtual Size getInputSize()=0;\n\n    /** @brief Retreive retina output buffer size that can be different from the input if a spatial log\n    transformation is applied\n    @return the retina output buffer size\n     */\n    CV_WRAP virtual Size getOutputSize()=0;\n\n    /** @brief Try to open an XML retina parameters file to adjust current retina instance setup\n\n    - if the xml file does not exist, then default setup is applied\n    - warning, Exceptions are thrown if read XML file is not valid\n    @param retinaParameterFile the parameters filename\n    @param applyDefaultSetupOnFailure set to true if an error must be thrown on error\n    You can retreive the current parameers structure using method Retina::getParameters and update\n    it before running method Retina::setup\n     */\n    CV_WRAP virtual void setup(String retinaParameterFile=\"\", const bool applyDefaultSetupOnFailure=true)=0;\n\n    /** @overload\n    @param fs the open Filestorage which contains retina parameters\n    @param applyDefaultSetupOnFailure set to true if an error must be thrown on error\n    */\n    virtual void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true)=0;\n\n    /** @overload\n    @param newParameters a parameters structures updated with the new target configuration.\n    */\n    virtual void setup(RetinaParameters newParameters)=0;\n\n    /**\n    @return the current parameters setup\n    */\n    virtual RetinaParameters getParameters()=0;\n\n    /** @brief Outputs a string showing the used parameters setup\n    @return a string which contains formated parameters information\n     */\n    CV_WRAP virtual const String printSetup()=0;\n\n    /** @brief Write xml/yml formated parameters information\n    @param fs the filename of the xml file that will be open and writen with formatted parameters\n    information\n     */\n    CV_WRAP virtual void write( String fs ) const=0;\n\n    /** @overload */\n    virtual void write( FileStorage& fs ) const=0;\n\n    /** @brief Setup the OPL and IPL parvo channels (see biologocal model)\n\n    OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering\n    which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance\n    (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the\n    Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See\n    reference papers for more informations.\n    for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011\n    @param colorMode specifies if (true) color is processed of not (false) to then processing gray\n    level image\n    @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)\n    @param photoreceptorsLocalAdaptationSensitivity the photoreceptors sensitivity renage is 0-1\n    (more log compression effect when value increases)\n    @param photoreceptorsTemporalConstant the time constant of the first order low pass filter of\n    the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is\n    frames, typical value is 1 frame\n    @param photoreceptorsSpatialConstant the spatial constant of the first order low pass filter of\n    the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is\n    pixels, typical value is 1 pixel\n    @param horizontalCellsGain gain of the horizontal cells network, if 0, then the mean value of\n    the output is zero, if the parameter is near 1, then, the luminance is not filtered and is\n    still reachable at the output, typicall value is 0\n    @param HcellsTemporalConstant the time constant of the first order low pass filter of the\n    horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is\n    frames, typical value is 1 frame, as the photoreceptors\n    @param HcellsSpatialConstant the spatial constant of the first order low pass filter of the\n    horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,\n    typical value is 5 pixel, this value is also used for local contrast computing when computing\n    the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular\n    channel model)\n    @param ganglionCellsSensitivity the compression strengh of the ganglion cells local adaptation\n    output, set a value between 0.6 and 1 for best results, a high value increases more the low\n    value sensitivity... and the output saturates faster, recommended value: 0.7\n     */\n    CV_WRAP virtual void setupOPLandIPLParvoChannel(const bool colorMode=true, const bool normaliseOutput = true, const float photoreceptorsLocalAdaptationSensitivity=0.7f, const float photoreceptorsTemporalConstant=0.5f, const float photoreceptorsSpatialConstant=0.53f, const float horizontalCellsGain=0.f, const float HcellsTemporalConstant=1.f, const float HcellsSpatialConstant=7.f, const float ganglionCellsSensitivity=0.7f)=0;\n\n    /** @brief Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel\n\n    this channel processes signals output from OPL processing stage in peripheral vision, it allows\n    motion information enhancement. It is decorrelated from the details channel. See reference\n    papers for more details.\n\n    @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)\n    @param parasolCells_beta the low pass filter gain used for local contrast adaptation at the\n    IPL level of the retina (for ganglion cells local adaptation), typical value is 0\n    @param parasolCells_tau the low pass filter time constant used for local contrast adaptation\n    at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical\n    value is 0 (immediate response)\n    @param parasolCells_k the low pass filter spatial constant used for local contrast adaptation\n    at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical\n    value is 5\n    @param amacrinCellsTemporalCutFrequency the time constant of the first order high pass fiter of\n    the magnocellular way (motion information channel), unit is frames, typical value is 1.2\n    @param V0CompressionParameter the compression strengh of the ganglion cells local adaptation\n    output, set a value between 0.6 and 1 for best results, a high value increases more the low\n    value sensitivity... and the output saturates faster, recommended value: 0.95\n    @param localAdaptintegration_tau specifies the temporal constant of the low pas filter\n    involved in the computation of the local \"motion mean\" for the local adaptation computation\n    @param localAdaptintegration_k specifies the spatial constant of the low pas filter involved\n    in the computation of the local \"motion mean\" for the local adaptation computation\n     */\n    CV_WRAP virtual void setupIPLMagnoChannel(const bool normaliseOutput = true, const float parasolCells_beta=0.f, const float parasolCells_tau=0.f, const float parasolCells_k=7.f, const float amacrinCellsTemporalCutFrequency=1.2f, const float V0CompressionParameter=0.95f, const float localAdaptintegration_tau=0.f, const float localAdaptintegration_k=7.f)=0;\n\n    /** @brief Method which allows retina to be applied on an input image,\n\n    after run, encapsulated retina module is ready to deliver its outputs using dedicated\n    acccessors, see getParvo and getMagno methods\n    @param inputImage the input Mat image to be processed, can be gray level or BGR coded in any\n    format (from 8bit to 16bits)\n     */\n    CV_WRAP virtual void run(InputArray inputImage)=0;\n\n    /** @brief Method which processes an image in the aim to correct its luminance correct\n    backlight problems, enhance details in shadows.\n\n    This method is designed to perform High Dynamic Range image tone mapping (compress \\>8bit/pixel\n    images to 8bit/pixel). This is a simplified version of the Retina Parvocellular model\n    (simplified version of the run/getParvo methods call) since it does not include the\n    spatio-temporal filter modelling the Outer Plexiform Layer of the retina that performs spectral\n    whitening and many other stuff. However, it works great for tone mapping and in a faster way.\n\n    Check the demos and experiments section to see examples and the way to perform tone mapping\n    using the original retina model and the method.\n\n    @param inputImage the input image to process (should be coded in float format : CV_32F,\n    CV_32FC1, CV_32F_C3, CV_32F_C4, the 4th channel won't be considered).\n    @param outputToneMappedImage the output 8bit/channel tone mapped image (CV_8U or CV_8UC3 format).\n     */\n    CV_WRAP virtual void applyFastToneMapping(InputArray inputImage, OutputArray outputToneMappedImage)=0;\n\n    /** @brief Accessor of the details channel of the retina (models foveal vision).\n\n    Warning, getParvoRAW methods return buffers that are not rescaled within range [0;255] while\n    the non RAW method allows a normalized matrix to be retrieved.\n\n    @param retinaOutput_parvo the output buffer (reallocated if necessary), format can be :\n    -   a Mat, this output is rescaled for standard 8bits image processing use in OpenCV\n    -   RAW methods actually return a 1D matrix (encoding is R1, R2, ... Rn, G1, G2, ..., Gn, B1,\n    B2, ...Bn), this output is the original retina filter model output, without any\n    quantification or rescaling.\n    @see getParvoRAW\n     */\n    CV_WRAP virtual void getParvo(OutputArray retinaOutput_parvo)=0;\n\n    /** @brief Accessor of the details channel of the retina (models foveal vision).\n    @see getParvo\n     */\n    CV_WRAP virtual void getParvoRAW(OutputArray retinaOutput_parvo)=0;\n\n    /** @brief Accessor of the motion channel of the retina (models peripheral vision).\n\n    Warning, getMagnoRAW methods return buffers that are not rescaled within range [0;255] while\n    the non RAW method allows a normalized matrix to be retrieved.\n    @param retinaOutput_magno the output buffer (reallocated if necessary), format can be :\n    -   a Mat, this output is rescaled for standard 8bits image processing use in OpenCV\n    -   RAW methods actually return a 1D matrix (encoding is M1, M2,... Mn), this output is the\n    original retina filter model output, without any quantification or rescaling.\n    @see getMagnoRAW\n     */\n    CV_WRAP virtual void getMagno(OutputArray retinaOutput_magno)=0;\n\n    /** @brief Accessor of the motion channel of the retina (models peripheral vision).\n    @see getMagno\n     */\n    CV_WRAP virtual void getMagnoRAW(OutputArray retinaOutput_magno)=0;\n\n    /** @overload */\n    CV_WRAP virtual const Mat getMagnoRAW() const=0;\n    /** @overload */\n    CV_WRAP virtual const Mat getParvoRAW() const=0;\n\n    /** @brief Activate color saturation as the final step of the color demultiplexing process -\\> this\n    saturation is a sigmoide function applied to each channel of the demultiplexed image.\n    @param saturateColors boolean that activates color saturation (if true) or desactivate (if false)\n    @param colorSaturationValue the saturation factor : a simple factor applied on the chrominance\n    buffers\n     */\n    CV_WRAP virtual void setColorSaturation(const bool saturateColors=true, const float colorSaturationValue=4.0f)=0;\n\n    /** @brief Clears all retina buffers\n\n    (equivalent to opening the eyes after a long period of eye close ;o) whatchout the temporal\n    transition occuring just after this method call.\n     */\n    CV_WRAP virtual void clearBuffers()=0;\n\n    /** @brief Activate/desactivate the Magnocellular pathway processing (motion information extraction), by\n    default, it is activated\n    @param activate true if Magnocellular output should be activated, false if not... if activated,\n    the Magnocellular output can be retrieved using the **getMagno** methods\n     */\n    CV_WRAP virtual void activateMovingContoursProcessing(const bool activate)=0;\n\n    /** @brief Activate/desactivate the Parvocellular pathway processing (contours information extraction), by\n    default, it is activated\n    @param activate true if Parvocellular (contours information extraction) output should be\n    activated, false if not... if activated, the Parvocellular output can be retrieved using the\n    Retina::getParvo methods\n     */\n    CV_WRAP virtual void activateContoursProcessing(const bool activate)=0;\n};\n\n//! @relates bioinspired::Retina\n//! @{\n\n/** @overload */\nCV_EXPORTS_W Ptr<Retina> createRetina(Size inputSize);\n/** @brief Constructors from standardized interfaces : retreive a smart pointer to a Retina instance\n\n@param inputSize the input frame size\n@param colorMode the chosen processing mode : with or without color processing\n@param colorSamplingMethod specifies which kind of color sampling will be used :\n-   cv::bioinspired::RETINA_COLOR_RANDOM: each pixel position is either R, G or B in a random choice\n-   cv::bioinspired::RETINA_COLOR_DIAGONAL: color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...\n-   cv::bioinspired::RETINA_COLOR_BAYER: standard bayer sampling\n@param useRetinaLogSampling activate retina log sampling, if true, the 2 following parameters can\nbe used\n@param reductionFactor only usefull if param useRetinaLogSampling=true, specifies the reduction\nfactor of the output frame (as the center (fovea) is high resolution and corners can be\nunderscaled, then a reduction of the output is allowed without precision leak\n@param samplingStrenght only usefull if param useRetinaLogSampling=true, specifies the strenght of\nthe log scale that is applied\n */\nCV_EXPORTS_W Ptr<Retina> createRetina(Size inputSize, const bool colorMode, int colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const float reductionFactor=1.0f, const float samplingStrenght=10.0f);\n\n#ifdef HAVE_OPENCV_OCL\nPtr<Retina> createRetina_OCL(Size inputSize);\nPtr<Retina> createRetina_OCL(Size inputSize, const bool colorMode, int colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const float reductionFactor=1.0f, const float samplingStrenght=10.0f);\n#endif\n\n//! @}\n\n//! @}\n\n}\n}\n#endif /* __OPENCV_BIOINSPIRED_RETINA_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/bioinspired/retinafasttonemapping.hpp",
    "content": "\n/*#******************************************************************************\n ** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n **\n ** By downloading, copying, installing or using the software you agree to this license.\n ** If you do not agree to this license, do not download, install,\n ** copy or use the software.\n **\n **\n ** bioinspired : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab.\n **\n ** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications)\n **\n **  Creation - enhancement process 2007-2013\n **      Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France\n **\n ** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr).\n ** Refer to the following research paper for more information:\n ** Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011\n ** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book:\n ** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.\n **\n **\n **\n **\n **\n ** This class is based on image processing tools of the author and already used within the Retina class (this is the same code as method retina::applyFastToneMapping, but in an independent class, it is ligth from a memory requirement point of view). It implements an adaptation of the efficient tone mapping algorithm propose by David Alleyson, Sabine Susstruck and Laurence Meylan's work, please cite:\n ** -> Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816\n **\n **\n **                          License Agreement\n **               For Open Source Computer Vision Library\n **\n ** Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n ** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.\n **\n **               For Human Visual System tools (bioinspired)\n ** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved.\n **\n ** Third party copyrights are property of their respective owners.\n **\n ** Redistribution and use in source and binary forms, with or without modification,\n ** are permitted provided that the following conditions are met:\n **\n ** * Redistributions of source code must retain the above copyright notice,\n **    this list of conditions and the following disclaimer.\n **\n ** * Redistributions in binary form must reproduce the above copyright notice,\n **    this list of conditions and the following disclaimer in the documentation\n **    and/or other materials provided with the distribution.\n **\n ** * The name of the copyright holders may not be used to endorse or promote products\n **    derived from this software without specific prior written permission.\n **\n ** This software is provided by the copyright holders and contributors \"as is\" and\n ** any express or implied warranties, including, but not limited to, the implied\n ** warranties of merchantability and fitness for a particular purpose are disclaimed.\n ** In no event shall the Intel Corporation or contributors be liable for any direct,\n ** indirect, incidental, special, exemplary, or consequential damages\n ** (including, but not limited to, procurement of substitute goods or services;\n ** loss of use, data, or profits; or business interruption) however caused\n ** and on any theory of liability, whether in contract, strict liability,\n ** or tort (including negligence or otherwise) arising in any way out of\n ** the use of this software, even if advised of the possibility of such damage.\n *******************************************************************************/\n\n#ifndef __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__\n#define __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__\n\n/**\n@file\n@date May 26, 2013\n@author Alexandre Benoit\n */\n\n#include \"opencv2/core.hpp\" // for all OpenCV core functionalities access, including cv::Exception support\n\nnamespace cv{\nnamespace bioinspired{\n\n//! @addtogroup bioinspired\n//! @{\n\n/** @brief  a wrapper class which allows the tone mapping algorithm of Meylan&al(2007) to be used with OpenCV.\n\nThis algorithm is already implemented in thre Retina class (retina::applyFastToneMapping) but used it does not require all the retina model to be allocated. This allows a light memory use for low memory devices (smartphones, etc.\nAs a summary, these are the model properties:\n- 2 stages of local luminance adaptation with a different local neighborhood for each.\n- first stage models the retina photorecetors local luminance adaptation\n- second stage models th ganglion cells local information adaptation\n- compared to the initial publication, this class uses spatio-temporal low pass filters instead of spatial only filters.\n  this can help noise robustness and temporal stability for video sequence use cases.\n\nfor more information, read to the following papers :\nMeylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011\nregarding spatio-temporal filter and the bigger retina model :\nVision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.\n*/\nclass CV_EXPORTS_W RetinaFastToneMapping : public Algorithm\n{\npublic:\n\n    /** @brief applies a luminance correction (initially High Dynamic Range (HDR) tone mapping)\n\n    using only the 2 local adaptation stages of the retina parvocellular channel : photoreceptors\n    level and ganlion cells level. Spatio temporal filtering is applied but limited to temporal\n    smoothing and eventually high frequencies attenuation. This is a lighter method than the one\n    available using the regular retina::run method. It is then faster but it does not include\n    complete temporal filtering nor retina spectral whitening. Then, it can have a more limited\n    effect on images with a very high dynamic range. This is an adptation of the original still\n    image HDR tone mapping algorithm of David Alleyson, Sabine Susstruck and Laurence Meylan's\n    work, please cite: -> Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local\n    Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of\n    America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816\n\n    @param inputImage the input image to process RGB or gray levels\n    @param outputToneMappedImage the output tone mapped image\n    */\n    CV_WRAP virtual void applyFastToneMapping(InputArray inputImage, OutputArray outputToneMappedImage)=0;\n\n    /** @brief updates tone mapping behaviors by adjusing the local luminance computation area\n\n    @param photoreceptorsNeighborhoodRadius the first stage local adaptation area\n    @param ganglioncellsNeighborhoodRadius the second stage local adaptation area\n    @param meanLuminanceModulatorK the factor applied to modulate the meanLuminance information\n    (default is 1, see reference paper)\n     */\n    CV_WRAP virtual void setup(const float photoreceptorsNeighborhoodRadius=3.f, const float ganglioncellsNeighborhoodRadius=1.f, const float meanLuminanceModulatorK=1.f)=0;\n};\n\n//! @relates bioinspired::RetinaFastToneMapping\nCV_EXPORTS_W Ptr<RetinaFastToneMapping> createRetinaFastToneMapping(Size inputSize);\n\n//! @}\n\n}\n}\n#endif /* __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/bioinspired/transientareassegmentationmodule.hpp",
    "content": "/*#******************************************************************************\n ** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n **\n ** By downloading, copying, installing or using the software you agree to this license.\n ** If you do not agree to this license, do not download, install,\n ** copy or use the software.\n **\n **\n ** bioinspired : interfaces allowing OpenCV users to integrate Human Vision System models.\n ** TransientAreasSegmentationModule Use: extract areas that present spatio-temporal changes.\n ** => It should be used at the output of the cv::bioinspired::Retina::getMagnoRAW() output that enhances spatio-temporal changes\n **\n ** Maintainers : Listic lab (code author current affiliation & applications)\n **\n **  Creation - enhancement process 2007-2015\n **      Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France\n **\n ** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr).\n ** Refer to the following research paper for more information:\n ** Strat, S.T.; Benoit, A.; Lambert, P., \"Retina enhanced bag of words descriptors for video classification,\" Signal Processing Conference (EUSIPCO), 2014 Proceedings of the 22nd European , vol., no., pp.1307,1311, 1-5 Sept. 2014 (http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6952461&isnumber=6951911)\n ** Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011\n ** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book:\n ** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.\n **\n **\n **                          License Agreement\n **               For Open Source Computer Vision Library\n **\n ** Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n ** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.\n **\n **               For Human Visual System tools (bioinspired)\n ** Copyright (C) 2007-2015, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved.\n **\n ** Third party copyrights are property of their respective owners.\n **\n ** Redistribution and use in source and binary forms, with or without modification,\n ** are permitted provided that the following conditions are met:\n **\n ** * Redistributions of source code must retain the above copyright notice,\n **    this list of conditions and the following disclaimer.\n **\n ** * Redistributions in binary form must reproduce the above copyright notice,\n **    this list of conditions and the following disclaimer in the documentation\n **    and/or other materials provided with the distribution.\n **\n ** * The name of the copyright holders may not be used to endorse or promote products\n **    derived from this software without specific prior written permission.\n **\n ** This software is provided by the copyright holders and contributors \"as is\" and\n ** any express or implied warranties, including, but not limited to, the implied\n ** warranties of merchantability and fitness for a particular purpose are disclaimed.\n ** In no event shall the Intel Corporation or contributors be liable for any direct,\n ** indirect, incidental, special, exemplary, or consequential damages\n ** (including, but not limited to, procurement of substitute goods or services;\n ** loss of use, data, or profits; or business interruption) however caused\n ** and on any theory of liability, whether in contract, strict liability,\n ** or tort (including negligence or otherwise) arising in any way out of\n ** the use of this software, even if advised of the possibility of such damage.\n *******************************************************************************/\n\n#ifndef SEGMENTATIONMODULE_HPP_\n#define SEGMENTATIONMODULE_HPP_\n\n/**\n@file\n@date 2007-2013\n@author Alexandre BENOIT, benoit.alexandre.vision@gmail.com\n*/\n\n#include \"opencv2/core.hpp\" // for all OpenCV core functionalities access, including cv::Exception support\n\nnamespace cv\n{\nnamespace bioinspired\n{\n//! @addtogroup bioinspired\n//! @{\n\n/** @brief parameter structure that stores the transient events detector setup parameters\n*/\nstruct SegmentationParameters{ // CV_EXPORTS_W_MAP to export to python native dictionnaries\n\t// default structure instance construction with default values\t\n\tSegmentationParameters():\n\t    thresholdON(100),\n\t    thresholdOFF(100),\n\t    localEnergy_temporalConstant(0.5),\n\t    localEnergy_spatialConstant(5),\n\t    neighborhoodEnergy_temporalConstant(1),\n\t    neighborhoodEnergy_spatialConstant(15),\n\t    contextEnergy_temporalConstant(1),\n\t    contextEnergy_spatialConstant(75){};\n\t// all properties list\n\tfloat thresholdON;\n\tfloat thresholdOFF;\n\t//! the time constant of the first order low pass filter, use it to cut high temporal frequencies (noise or fast motion), unit is frames, typical value is 0.5 frame\n\tfloat localEnergy_temporalConstant;\n\t//! the spatial constant of the first order low pass filter, use it to cut high spatial frequencies (noise or thick contours), unit is pixels, typical value is 5 pixel\n\tfloat localEnergy_spatialConstant;\n\t//! local neighborhood energy filtering parameters : the aim is to get information about the energy neighborhood to perform a center surround energy analysis\n\tfloat neighborhoodEnergy_temporalConstant;\n\tfloat neighborhoodEnergy_spatialConstant;\n\t//! context neighborhood energy filtering parameters : the aim is to get information about the energy on a wide neighborhood area to filtered out local effects\n\tfloat contextEnergy_temporalConstant;\n\tfloat contextEnergy_spatialConstant;\n};\n\n/** @brief class which provides a transient/moving areas segmentation module\n\nperform a locally adapted segmentation by using the retina magno input data Based on Alexandre\nBENOIT thesis: \"Le système visuel humain au secours de la vision par ordinateur\"\n\n3 spatio temporal filters are used:\n- a first one which filters the noise and local variations of the input motion energy\n- a second (more powerfull low pass spatial filter) which gives the neighborhood motion energy the\nsegmentation consists in the comparison of these both outputs, if the local motion energy is higher\nto the neighborhood otion energy, then the area is considered as moving and is segmented\n- a stronger third low pass filter helps decision by providing a smooth information about the\n\"motion context\" in a wider area\n */\n\nclass CV_EXPORTS_W TransientAreasSegmentationModule: public Algorithm\n{\npublic:\n\n\n    /** @brief return the sze of the manage input and output images\n    */\n    CV_WRAP virtual Size getSize()=0;\n\n    /** @brief try to open an XML segmentation parameters file to adjust current segmentation instance setup\n\n    - if the xml file does not exist, then default setup is applied\n    - warning, Exceptions are thrown if read XML file is not valid\n    @param segmentationParameterFile : the parameters filename\n    @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error\n     */\n    CV_WRAP virtual void setup(String segmentationParameterFile=\"\", const bool applyDefaultSetupOnFailure=true)=0;\n\n    /** @brief try to open an XML segmentation parameters file to adjust current segmentation instance setup\n\n    - if the xml file does not exist, then default setup is applied\n    - warning, Exceptions are thrown if read XML file is not valid\n    @param fs : the open Filestorage which contains segmentation parameters\n    @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error\n    */\n    virtual void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true)=0;\n\n    /** @brief try to open an XML segmentation parameters file to adjust current segmentation instance setup\n\n    - if the xml file does not exist, then default setup is applied\n    - warning, Exceptions are thrown if read XML file is not valid\n    @param newParameters : a parameters structures updated with the new target configuration\n     */\n    virtual void setup(SegmentationParameters newParameters)=0;\n\n    /** @brief return the current parameters setup\n    */\n    virtual SegmentationParameters getParameters()=0;\n\n    /** @brief parameters setup display method\n    @return a string which contains formatted parameters information\n    */\n    CV_WRAP virtual const String printSetup()=0;\n\n    /** @brief write xml/yml formated parameters information\n    @param fs : the filename of the xml file that will be open and writen with formatted parameters information\n    */\n    CV_WRAP virtual void write( String fs ) const=0;\n\n    /** @brief write xml/yml formated parameters information\n    @param fs : a cv::Filestorage object ready to be filled\n    */\n    virtual void write( cv::FileStorage& fs ) const=0;\n\n    /** @brief main processing method, get result using methods getSegmentationPicture()\n    @param inputToSegment : the image to process, it must match the instance buffer size !\n    @param channelIndex : the channel to process in case of multichannel images\n    */\n    CV_WRAP virtual void run(InputArray inputToSegment, const int channelIndex=0)=0;\n\n    /** @brief access function\n    @return the last segmentation result: a boolean picture which is resampled between 0 and 255 for a display purpose\n   */\n    CV_WRAP virtual void getSegmentationPicture(OutputArray transientAreas)=0;\n\n    /** @brief cleans all the buffers of the instance\n    */\n    CV_WRAP virtual void clearAllBuffers()=0;\n};\n\n/** @brief allocator\n@param inputSize : size of the images input to segment (output will be the same size)\n@relates bioinspired::TransientAreasSegmentationModule\n */\nCV_EXPORTS_W Ptr<TransientAreasSegmentationModule> createTransientAreasSegmentationModule(Size inputSize);\n\n//! @}\n\n}} // namespaces end : cv and bioinspired\n\n\n#endif\n\n\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/bioinspired.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_BIOINSPIRED_HPP__\n#define __OPENCV_BIOINSPIRED_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/bioinspired/retina.hpp\"\n#include \"opencv2/bioinspired/retinafasttonemapping.hpp\"\n#include \"opencv2/bioinspired/transientareassegmentationmodule.hpp\"\n\n/** @defgroup bioinspired Biologically inspired vision models and derivated tools\n\nThe module provides biological visual systems models (human visual system and others). It also\nprovides derivated objects that take advantage of those bio-inspired models.\n\n@ref bioinspired_retina\n\n*/\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/calib3d/calib3d.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/calib3d.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/calib3d/calib3d_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CALIB3D_C_H__\n#define __OPENCV_CALIB3D_C_H__\n\n#include \"opencv2/core/core_c.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/** @addtogroup calib3d_c\n  @{\n  */\n\n/****************************************************************************************\\\n*                      Camera Calibration, Pose Estimation and Stereo                    *\n\\****************************************************************************************/\n\ntypedef struct CvPOSITObject CvPOSITObject;\n\n/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */\nCVAPI(CvPOSITObject*)  cvCreatePOSITObject( CvPoint3D32f* points, int point_count );\n\n\n/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of\n   an object given its model and projection in a weak-perspective case */\nCVAPI(void)  cvPOSIT(  CvPOSITObject* posit_object, CvPoint2D32f* image_points,\n                       double focal_length, CvTermCriteria criteria,\n                       float* rotation_matrix, float* translation_vector);\n\n/* Releases CvPOSITObject structure */\nCVAPI(void)  cvReleasePOSITObject( CvPOSITObject**  posit_object );\n\n/* updates the number of RANSAC iterations */\nCVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,\n                                   int model_points, int max_iters );\n\nCVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );\n\n/* Calculates fundamental matrix given a set of corresponding points */\n#define CV_FM_7POINT 1\n#define CV_FM_8POINT 2\n\n#define CV_LMEDS 4\n#define CV_RANSAC 8\n\n#define CV_FM_LMEDS_ONLY  CV_LMEDS\n#define CV_FM_RANSAC_ONLY CV_RANSAC\n#define CV_FM_LMEDS CV_LMEDS\n#define CV_FM_RANSAC CV_RANSAC\n\nenum\n{\n    CV_ITERATIVE = 0,\n    CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua \"EPnP: Efficient Perspective-n-Point Camera Pose Estimation\"\n    CV_P3P = 2, // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; \"Complete Solution Classification for the Perspective-Three-Point Problem\"\n    CV_DLS = 3 // Joel A. Hesch and Stergios I. Roumeliotis. \"A Direct Least-Squares (DLS) Method for PnP\"\n};\n\nCVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,\n                                 CvMat* fundamental_matrix,\n                                 int method CV_DEFAULT(CV_FM_RANSAC),\n                                 double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),\n                                 CvMat* status CV_DEFAULT(NULL) );\n\n/* For each input point on one of images\n   computes parameters of the corresponding\n   epipolar line on the other image */\nCVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,\n                                         int which_image,\n                                         const CvMat* fundamental_matrix,\n                                         CvMat* correspondent_lines );\n\n/* Triangulation functions */\n\nCVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,\n                                CvMat* projPoints1, CvMat* projPoints2,\n                                CvMat* points4D);\n\nCVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,\n                             CvMat* new_points1, CvMat* new_points2);\n\n\n/* Computes the optimal new camera matrix according to the free scaling parameter alpha:\n   alpha=0 - only valid pixels will be retained in the undistorted image\n   alpha=1 - all the source image pixels will be retained in the undistorted image\n*/\nCVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,\n                                         const CvMat* dist_coeffs,\n                                         CvSize image_size, double alpha,\n                                         CvMat* new_camera_matrix,\n                                         CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),\n                                         CvRect* valid_pixel_ROI CV_DEFAULT(0),\n                                         int center_principal_point CV_DEFAULT(0));\n\n/* Converts rotation vector to rotation matrix or vice versa */\nCVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,\n                         CvMat* jacobian CV_DEFAULT(0) );\n\n/* Finds perspective transformation between the object plane and image (view) plane */\nCVAPI(int) cvFindHomography( const CvMat* src_points,\n                             const CvMat* dst_points,\n                             CvMat* homography,\n                             int method CV_DEFAULT(0),\n                             double ransacReprojThreshold CV_DEFAULT(3),\n                             CvMat* mask CV_DEFAULT(0),\n                             int maxIters CV_DEFAULT(2000),\n                             double confidence CV_DEFAULT(0.995));\n\n/* Computes RQ decomposition for 3x3 matrices */\nCVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,\n                           CvMat *matrixQx CV_DEFAULT(NULL),\n                           CvMat *matrixQy CV_DEFAULT(NULL),\n                           CvMat *matrixQz CV_DEFAULT(NULL),\n                           CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));\n\n/* Computes projection matrix decomposition */\nCVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,\n                                         CvMat *rotMatr, CvMat *posVect,\n                                         CvMat *rotMatrX CV_DEFAULT(NULL),\n                                         CvMat *rotMatrY CV_DEFAULT(NULL),\n                                         CvMat *rotMatrZ CV_DEFAULT(NULL),\n                                         CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));\n\n/* Computes d(AB)/dA and d(AB)/dB */\nCVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );\n\n/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),\n   t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */\nCVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,\n                         const CvMat* _rvec2, const CvMat* _tvec2,\n                         CvMat* _rvec3, CvMat* _tvec3,\n                         CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),\n                         CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),\n                         CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),\n                         CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );\n\n/* Projects object points to the view plane using\n   the specified extrinsic and intrinsic camera parameters */\nCVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,\n                              const CvMat* translation_vector, const CvMat* camera_matrix,\n                              const CvMat* distortion_coeffs, CvMat* image_points,\n                              CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),\n                              CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),\n                              CvMat* dpddist CV_DEFAULT(NULL),\n                              double aspect_ratio CV_DEFAULT(0));\n\n/* Finds extrinsic camera parameters from\n   a few known corresponding point pairs and intrinsic parameters */\nCVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,\n                                          const CvMat* image_points,\n                                          const CvMat* camera_matrix,\n                                          const CvMat* distortion_coeffs,\n                                          CvMat* rotation_vector,\n                                          CvMat* translation_vector,\n                                          int use_extrinsic_guess CV_DEFAULT(0) );\n\n/* Computes initial estimate of the intrinsic camera parameters\n   in case of planar calibration target (e.g. chessboard) */\nCVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,\n                                     const CvMat* image_points,\n                                     const CvMat* npoints, CvSize image_size,\n                                     CvMat* camera_matrix,\n                                     double aspect_ratio CV_DEFAULT(1.) );\n\n#define CV_CALIB_CB_ADAPTIVE_THRESH  1\n#define CV_CALIB_CB_NORMALIZE_IMAGE  2\n#define CV_CALIB_CB_FILTER_QUADS     4\n#define CV_CALIB_CB_FAST_CHECK       8\n\n// Performs a fast check if a chessboard is in the input image. This is a workaround to\n// a problem of cvFindChessboardCorners being slow on images with no chessboard\n// - src: input image\n// - size: chessboard size\n// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,\n// 0 if there is no chessboard, -1 in case of error\nCVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);\n\n    /* Detects corners on a chessboard calibration pattern */\nCVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,\n                                    CvPoint2D32f* corners,\n                                    int* corner_count CV_DEFAULT(NULL),\n                                    int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );\n\n/* Draws individual chessboard corners or the whole chessboard detected */\nCVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,\n                                     CvPoint2D32f* corners,\n                                     int count, int pattern_was_found );\n\n#define CV_CALIB_USE_INTRINSIC_GUESS  1\n#define CV_CALIB_FIX_ASPECT_RATIO     2\n#define CV_CALIB_FIX_PRINCIPAL_POINT  4\n#define CV_CALIB_ZERO_TANGENT_DIST    8\n#define CV_CALIB_FIX_FOCAL_LENGTH 16\n#define CV_CALIB_FIX_K1  32\n#define CV_CALIB_FIX_K2  64\n#define CV_CALIB_FIX_K3  128\n#define CV_CALIB_FIX_K4  2048\n#define CV_CALIB_FIX_K5  4096\n#define CV_CALIB_FIX_K6  8192\n#define CV_CALIB_RATIONAL_MODEL 16384\n#define CV_CALIB_THIN_PRISM_MODEL 32768\n#define CV_CALIB_FIX_S1_S2_S3_S4  65536\n#define CV_CALIB_TILTED_MODEL  262144\n#define CV_CALIB_FIX_TAUX_TAUY  524288\n\n\n/* Finds intrinsic and extrinsic camera parameters\n   from a few views of known calibration pattern */\nCVAPI(double) cvCalibrateCamera2( const CvMat* object_points,\n                                const CvMat* image_points,\n                                const CvMat* point_counts,\n                                CvSize image_size,\n                                CvMat* camera_matrix,\n                                CvMat* distortion_coeffs,\n                                CvMat* rotation_vectors CV_DEFAULT(NULL),\n                                CvMat* translation_vectors CV_DEFAULT(NULL),\n                                int flags CV_DEFAULT(0),\n                                CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(\n                                    CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );\n\n/* Computes various useful characteristics of the camera from the data computed by\n   cvCalibrateCamera2 */\nCVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,\n                                CvSize image_size,\n                                double aperture_width CV_DEFAULT(0),\n                                double aperture_height CV_DEFAULT(0),\n                                double *fovx CV_DEFAULT(NULL),\n                                double *fovy CV_DEFAULT(NULL),\n                                double *focal_length CV_DEFAULT(NULL),\n                                CvPoint2D64f *principal_point CV_DEFAULT(NULL),\n                                double *pixel_aspect_ratio CV_DEFAULT(NULL));\n\n#define CV_CALIB_FIX_INTRINSIC  256\n#define CV_CALIB_SAME_FOCAL_LENGTH 512\n\n/* Computes the transformation from one camera coordinate system to another one\n   from a few correspondent views of the same calibration target. Optionally, calibrates\n   both cameras */\nCVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,\n                               const CvMat* image_points2, const CvMat* npoints,\n                               CvMat* camera_matrix1, CvMat* dist_coeffs1,\n                               CvMat* camera_matrix2, CvMat* dist_coeffs2,\n                               CvSize image_size, CvMat* R, CvMat* T,\n                               CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),\n                               int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC),\n                               CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(\n                                   CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)) );\n\n#define CV_CALIB_ZERO_DISPARITY 1024\n\n/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both\n   views parallel (=> to make all the epipolar lines horizontal or vertical) */\nCVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,\n                             const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,\n                             CvSize image_size, const CvMat* R, const CvMat* T,\n                             CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,\n                             CvMat* Q CV_DEFAULT(0),\n                             int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),\n                             double alpha CV_DEFAULT(-1),\n                             CvSize new_image_size CV_DEFAULT(cvSize(0,0)),\n                             CvRect* valid_pix_ROI1 CV_DEFAULT(0),\n                             CvRect* valid_pix_ROI2 CV_DEFAULT(0));\n\n/* Computes rectification transformations for uncalibrated pair of images using a set\n   of point correspondences */\nCVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,\n                                        const CvMat* F, CvSize img_size,\n                                        CvMat* H1, CvMat* H2,\n                                        double threshold CV_DEFAULT(5));\n\n\n\n/* stereo correspondence parameters and functions */\n\n#define CV_STEREO_BM_NORMALIZED_RESPONSE  0\n#define CV_STEREO_BM_XSOBEL               1\n\n/* Block matching algorithm structure */\ntypedef struct CvStereoBMState\n{\n    // pre-filtering (normalization of input images)\n    int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now\n    int preFilterSize; // averaging window size: ~5x5..21x21\n    int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]\n\n    // correspondence using Sum of Absolute Difference (SAD)\n    int SADWindowSize; // ~5x5..21x21\n    int minDisparity;  // minimum disparity (can be negative)\n    int numberOfDisparities; // maximum disparity - minimum disparity (> 0)\n\n    // post-filtering\n    int textureThreshold;  // the disparity is only computed for pixels\n                           // with textured enough neighborhood\n    int uniquenessRatio;   // accept the computed disparity d* only if\n                           // SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)\n                           // for any d != d*+/-1 within the search range.\n    int speckleWindowSize; // disparity variation window\n    int speckleRange; // acceptable range of variation in window\n\n    int trySmallerWindows; // if 1, the results may be more accurate,\n                           // at the expense of slower processing\n    CvRect roi1, roi2;\n    int disp12MaxDiff;\n\n    // temporary buffers\n    CvMat* preFilteredImg0;\n    CvMat* preFilteredImg1;\n    CvMat* slidingSumBuf;\n    CvMat* cost;\n    CvMat* disp;\n} CvStereoBMState;\n\n#define CV_STEREO_BM_BASIC 0\n#define CV_STEREO_BM_FISH_EYE 1\n#define CV_STEREO_BM_NARROW 2\n\nCVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),\n                                              int numberOfDisparities CV_DEFAULT(0));\n\nCVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );\n\nCVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,\n                                          CvArr* disparity, CvStereoBMState* state );\n\nCVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,\n                                      int numberOfDisparities, int SADWindowSize );\n\nCVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,\n                                 int minDisparity, int numberOfDisparities,\n                                 int disp12MaxDiff CV_DEFAULT(1) );\n\n/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */\nCVAPI(void)  cvReprojectImageTo3D( const CvArr* disparityImage,\n                                   CvArr* _3dImage, const CvMat* Q,\n                                   int handleMissingValues CV_DEFAULT(0) );\n\n/** @} calib3d_c */\n\n#ifdef __cplusplus\n} // extern \"C\"\n\n//////////////////////////////////////////////////////////////////////////////////////////\nclass CV_EXPORTS CvLevMarq\n{\npublic:\n    CvLevMarq();\n    CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=\n              cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),\n              bool completeSymmFlag=false );\n    ~CvLevMarq();\n    void init( int nparams, int nerrs, CvTermCriteria criteria=\n              cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),\n              bool completeSymmFlag=false );\n    bool update( const CvMat*& param, CvMat*& J, CvMat*& err );\n    bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );\n\n    void clear();\n    void step();\n    enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };\n\n    cv::Ptr<CvMat> mask;\n    cv::Ptr<CvMat> prevParam;\n    cv::Ptr<CvMat> param;\n    cv::Ptr<CvMat> J;\n    cv::Ptr<CvMat> err;\n    cv::Ptr<CvMat> JtJ;\n    cv::Ptr<CvMat> JtJN;\n    cv::Ptr<CvMat> JtErr;\n    cv::Ptr<CvMat> JtJV;\n    cv::Ptr<CvMat> JtJW;\n    double prevErrNorm, errNorm;\n    int lambdaLg10;\n    CvTermCriteria criteria;\n    int state;\n    int iters;\n    bool completeSymmFlag;\n    int solveMethod;\n};\n\n#endif\n\n#endif /* __OPENCV_CALIB3D_C_H__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/calib3d.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CALIB3D_HPP__\n#define __OPENCV_CALIB3D_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/features2d.hpp\"\n#include \"opencv2/core/affine.hpp\"\n\n/**\n  @defgroup calib3d Camera Calibration and 3D Reconstruction\n\nThe functions in this section use a so-called pinhole camera model. In this model, a scene view is\nformed by projecting 3D points into the image plane using a perspective transformation.\n\n\\f[s  \\; m' = A [R|t] M'\\f]\n\nor\n\n\\f[s  \\vecthree{u}{v}{1} = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\n\\begin{bmatrix}\nr_{11} & r_{12} & r_{13} & t_1  \\\\\nr_{21} & r_{22} & r_{23} & t_2  \\\\\nr_{31} & r_{32} & r_{33} & t_3\n\\end{bmatrix}\n\\begin{bmatrix}\nX \\\\\nY \\\\\nZ \\\\\n1\n\\end{bmatrix}\\f]\n\nwhere:\n\n-   \\f$(X, Y, Z)\\f$ are the coordinates of a 3D point in the world coordinate space\n-   \\f$(u, v)\\f$ are the coordinates of the projection point in pixels\n-   \\f$A\\f$ is a camera matrix, or a matrix of intrinsic parameters\n-   \\f$(cx, cy)\\f$ is a principal point that is usually at the image center\n-   \\f$fx, fy\\f$ are the focal lengths expressed in pixel units.\n\nThus, if an image from the camera is scaled by a factor, all of these parameters should be scaled\n(multiplied/divided, respectively) by the same factor. The matrix of intrinsic parameters does not\ndepend on the scene viewed. So, once estimated, it can be re-used as long as the focal length is\nfixed (in case of zoom lens). The joint rotation-translation matrix \\f$[R|t]\\f$ is called a matrix of\nextrinsic parameters. It is used to describe the camera motion around a static scene, or vice versa,\nrigid motion of an object in front of a still camera. That is, \\f$[R|t]\\f$ translates coordinates of a\npoint \\f$(X, Y, Z)\\f$ to a coordinate system, fixed with respect to the camera. The transformation above\nis equivalent to the following (when \\f$z \\ne 0\\f$ ):\n\n\\f[\\begin{array}{l}\n\\vecthree{x}{y}{z} = R  \\vecthree{X}{Y}{Z} + t \\\\\nx' = x/z \\\\\ny' = y/z \\\\\nu = f_x*x' + c_x \\\\\nv = f_y*y' + c_y\n\\end{array}\\f]\n\nReal lenses usually have some distortion, mostly radial distortion and slight tangential distortion.\nSo, the above model is extended as:\n\n\\f[\\begin{array}{l}\n\\vecthree{x}{y}{z} = R  \\vecthree{X}{Y}{Z} + t \\\\\nx' = x/z \\\\\ny' = y/z \\\\\nx'' = x'  \\frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + 2 p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4 \\\\\ny'' = y'  \\frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\\\\n\\text{where} \\quad r^2 = x'^2 + y'^2  \\\\\nu = f_x*x'' + c_x \\\\\nv = f_y*y'' + c_y\n\\end{array}\\f]\n\n\\f$k_1\\f$, \\f$k_2\\f$, \\f$k_3\\f$, \\f$k_4\\f$, \\f$k_5\\f$, and \\f$k_6\\f$ are radial distortion coefficients. \\f$p_1\\f$ and \\f$p_2\\f$ are\ntangential distortion coefficients. \\f$s_1\\f$, \\f$s_2\\f$, \\f$s_3\\f$, and \\f$s_4\\f$, are the thin prism distortion\ncoefficients. Higher-order coefficients are not considered in OpenCV.\n\nIn some cases the image sensor may be tilted in order to focus an oblique plane in front of the\ncamera (Scheimpfug condition). This can be useful for particle image velocimetry (PIV) or\ntriangulation with a laser fan. The tilt causes a perspective distortion of \\f$x''\\f$ and\n\\f$y''\\f$. This distortion can be modelled in the following way, see e.g. @cite Louhichi07.\n\n\\f[\\begin{array}{l}\ns\\vecthree{x'''}{y'''}{1} =\n\\vecthreethree{R_{33}(\\tau_x, \\tau_y)}{0}{-R_{13}(\\tau_x, \\tau_y)}\n{0}{R_{33}(\\tau_x, \\tau_y)}{-R_{23}(\\tau_x, \\tau_y)}\n{0}{0}{1} R(\\tau_x, \\tau_y) \\vecthree{x''}{y''}{1}\\\\\nu = f_x*x''' + c_x \\\\\nv = f_y*y''' + c_y\n\\end{array}\\f]\n\nwhere the matrix \\f$R(\\tau_x, \\tau_y)\\f$ is defined by two rotations with angular parameter \\f$\\tau_x\\f$\nand \\f$\\tau_y\\f$, respectively,\n\n\\f[\nR(\\tau_x, \\tau_y) =\n\\vecthreethree{\\cos(\\tau_y)}{0}{-\\sin(\\tau_y)}{0}{1}{0}{\\sin(\\tau_y)}{0}{\\cos(\\tau_y)}\n\\vecthreethree{1}{0}{0}{0}{\\cos(\\tau_x)}{\\sin(\\tau_x)}{0}{-\\sin(\\tau_x)}{\\cos(\\tau_x)} =\n\\vecthreethree{\\cos(\\tau_y)}{\\sin(\\tau_y)\\sin(\\tau_x)}{-\\sin(\\tau_y)\\cos(\\tau_x)}\n{0}{\\cos(\\tau_x)}{\\sin(\\tau_x)}\n{\\sin(\\tau_y)}{-\\cos(\\tau_y)\\sin(\\tau_x)}{\\cos(\\tau_y)\\cos(\\tau_x)}.\n\\f]\n\nIn the functions below the coefficients are passed or returned as\n\n\\f[(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f]\n\nvector. That is, if the vector contains four elements, it means that \\f$k_3=0\\f$ . The distortion\ncoefficients do not depend on the scene viewed. Thus, they also belong to the intrinsic camera\nparameters. And they remain the same regardless of the captured image resolution. If, for example, a\ncamera has been calibrated on images of 320 x 240 resolution, absolutely the same distortion\ncoefficients can be used for 640 x 480 images from the same camera while \\f$f_x\\f$, \\f$f_y\\f$, \\f$c_x\\f$, and\n\\f$c_y\\f$ need to be scaled appropriately.\n\nThe functions below use the above model to do the following:\n\n-   Project 3D points to the image plane given intrinsic and extrinsic parameters.\n-   Compute extrinsic parameters given intrinsic parameters, a few 3D points, and their\nprojections.\n-   Estimate intrinsic and extrinsic camera parameters from several views of a known calibration\npattern (every view is described by several 3D-2D point correspondences).\n-   Estimate the relative position and orientation of the stereo camera \"heads\" and compute the\n*rectification* transformation that makes the camera optical axes parallel.\n\n@note\n   -   A calibration sample for 3 cameras in horizontal position can be found at\n        opencv_source_code/samples/cpp/3calibration.cpp\n    -   A calibration sample based on a sequence of images can be found at\n        opencv_source_code/samples/cpp/calibration.cpp\n    -   A calibration sample in order to do 3D reconstruction can be found at\n        opencv_source_code/samples/cpp/build3dmodel.cpp\n    -   A calibration sample of an artificially generated camera and chessboard patterns can be\n        found at opencv_source_code/samples/cpp/calibration_artificial.cpp\n    -   A calibration example on stereo calibration can be found at\n        opencv_source_code/samples/cpp/stereo_calib.cpp\n    -   A calibration example on stereo matching can be found at\n        opencv_source_code/samples/cpp/stereo_match.cpp\n    -   (Python) A camera calibration sample can be found at\n        opencv_source_code/samples/python/calibrate.py\n\n  @{\n    @defgroup calib3d_fisheye Fisheye camera model\n\n    Definitions: Let P be a point in 3D of coordinates X in the world reference frame (stored in the\n    matrix X) The coordinate vector of P in the camera reference frame is:\n\n    \\f[Xc = R X + T\\f]\n\n    where R is the rotation matrix corresponding to the rotation vector om: R = rodrigues(om); call x, y\n    and z the 3 coordinates of Xc:\n\n    \\f[x = Xc_1 \\\\ y = Xc_2 \\\\ z = Xc_3\\f]\n\n    The pinehole projection coordinates of P is [a; b] where\n\n    \\f[a = x / z \\ and \\ b = y / z \\\\ r^2 = a^2 + b^2 \\\\ \\theta = atan(r)\\f]\n\n    Fisheye distortion:\n\n    \\f[\\theta_d = \\theta (1 + k_1 \\theta^2 + k_2 \\theta^4 + k_3 \\theta^6 + k_4 \\theta^8)\\f]\n\n    The distorted point coordinates are [x'; y'] where\n\n    \\f[x' = (\\theta_d / r) x \\\\ y' = (\\theta_d / r) y \\f]\n\n    Finally, conversion into pixel coordinates: The final pixel coordinates vector [u; v] where:\n\n    \\f[u = f_x (x' + \\alpha y') + c_x \\\\\n    v = f_y yy + c_y\\f]\n\n    @defgroup calib3d_c C API\n\n  @}\n */\n\nnamespace cv\n{\n\n//! @addtogroup calib3d\n//! @{\n\n//! type of the robust estimation algorithm\nenum { LMEDS  = 4, //!< least-median algorithm\n       RANSAC = 8, //!< RANSAC algorithm\n       RHO    = 16 //!< RHO algorithm\n     };\n\nenum { SOLVEPNP_ITERATIVE = 0,\n       SOLVEPNP_EPNP      = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp\n       SOLVEPNP_P3P       = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete\n       SOLVEPNP_DLS       = 3, //!< A Direct Least-Squares (DLS) Method for PnP  @cite hesch2011direct\n       SOLVEPNP_UPNP      = 4  //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive\n\n};\n\nenum { CALIB_CB_ADAPTIVE_THRESH = 1,\n       CALIB_CB_NORMALIZE_IMAGE = 2,\n       CALIB_CB_FILTER_QUADS    = 4,\n       CALIB_CB_FAST_CHECK      = 8\n     };\n\nenum { CALIB_CB_SYMMETRIC_GRID  = 1,\n       CALIB_CB_ASYMMETRIC_GRID = 2,\n       CALIB_CB_CLUSTERING      = 4\n     };\n\nenum { CALIB_USE_INTRINSIC_GUESS = 0x00001,\n       CALIB_FIX_ASPECT_RATIO    = 0x00002,\n       CALIB_FIX_PRINCIPAL_POINT = 0x00004,\n       CALIB_ZERO_TANGENT_DIST   = 0x00008,\n       CALIB_FIX_FOCAL_LENGTH    = 0x00010,\n       CALIB_FIX_K1              = 0x00020,\n       CALIB_FIX_K2              = 0x00040,\n       CALIB_FIX_K3              = 0x00080,\n       CALIB_FIX_K4              = 0x00800,\n       CALIB_FIX_K5              = 0x01000,\n       CALIB_FIX_K6              = 0x02000,\n       CALIB_RATIONAL_MODEL      = 0x04000,\n       CALIB_THIN_PRISM_MODEL    = 0x08000,\n       CALIB_FIX_S1_S2_S3_S4     = 0x10000,\n       CALIB_TILTED_MODEL        = 0x40000,\n       CALIB_FIX_TAUX_TAUY       = 0x80000,\n       // only for stereo\n       CALIB_FIX_INTRINSIC       = 0x00100,\n       CALIB_SAME_FOCAL_LENGTH   = 0x00200,\n       // for stereo rectification\n       CALIB_ZERO_DISPARITY      = 0x00400,\n       CALIB_USE_LU              = (1 << 17), //!< use LU instead of SVD decomposition for solving. much faster but potentially less precise\n     };\n\n//! the algorithm for finding fundamental matrix\nenum { FM_7POINT = 1, //!< 7-point algorithm\n       FM_8POINT = 2, //!< 8-point algorithm\n       FM_LMEDS  = 4, //!< least-median algorithm\n       FM_RANSAC = 8  //!< RANSAC algorithm\n     };\n\n\n\n/** @brief Converts a rotation matrix to a rotation vector or vice versa.\n\n@param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).\n@param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.\n@param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial\nderivatives of the output array components with respect to the input array components.\n\n\\f[\\begin{array}{l} \\theta \\leftarrow norm(r) \\\\ r  \\leftarrow r/ \\theta \\\\ R =  \\cos{\\theta} I + (1- \\cos{\\theta} ) r r^T +  \\sin{\\theta} \\vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \\end{array}\\f]\n\nInverse transformation can be also done easily, since\n\n\\f[\\sin ( \\theta ) \\vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \\frac{R - R^T}{2}\\f]\n\nA rotation vector is a convenient and most compact representation of a rotation matrix (since any\nrotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry\noptimization procedures like calibrateCamera, stereoCalibrate, or solvePnP .\n */\nCV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() );\n\n/** @brief Finds a perspective transformation between two planes.\n\n@param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2\nor vector\\<Point2f\\> .\n@param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or\na vector\\<Point2f\\> .\n@param method Method used to computed a homography matrix. The following methods are possible:\n-   **0** - a regular method using all the points\n-   **RANSAC** - RANSAC-based robust method\n-   **LMEDS** - Least-Median robust method\n-   **RHO**    - PROSAC-based robust method\n@param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier\n(used in the RANSAC and RHO methods only). That is, if\n\\f[\\| \\texttt{dstPoints} _i -  \\texttt{convertPointsHomogeneous} ( \\texttt{H} * \\texttt{srcPoints} _i) \\|  >  \\texttt{ransacReprojThreshold}\\f]\nthen the point \\f$i\\f$ is considered an outlier. If srcPoints and dstPoints are measured in pixels,\nit usually makes sense to set this parameter somewhere in the range of 1 to 10.\n@param mask Optional output mask set by a robust method ( RANSAC or LMEDS ). Note that the input\nmask values are ignored.\n@param maxIters The maximum number of RANSAC iterations, 2000 is the maximum it can be.\n@param confidence Confidence level, between 0 and 1.\n\nThe functions find and return the perspective transformation \\f$H\\f$ between the source and the\ndestination planes:\n\n\\f[s_i  \\vecthree{x'_i}{y'_i}{1} \\sim H  \\vecthree{x_i}{y_i}{1}\\f]\n\nso that the back-projection error\n\n\\f[\\sum _i \\left ( x'_i- \\frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \\right )^2+ \\left ( y'_i- \\frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \\right )^2\\f]\n\nis minimized. If the parameter method is set to the default value 0, the function uses all the point\npairs to compute an initial homography estimate with a simple least-squares scheme.\n\nHowever, if not all of the point pairs ( \\f$srcPoints_i\\f$, \\f$dstPoints_i\\f$ ) fit the rigid perspective\ntransformation (that is, there are some outliers), this initial estimate will be poor. In this case,\nyou can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different\nrandom subsets of the corresponding point pairs (of four pairs each), estimate the homography matrix\nusing this subset and a simple least-square algorithm, and then compute the quality/goodness of the\ncomputed homography (which is the number of inliers for RANSAC or the median re-projection error for\nLMeDs). The best subset is then used to produce the initial estimate of the homography matrix and\nthe mask of inliers/outliers.\n\nRegardless of the method, robust or not, the computed homography matrix is refined further (using\ninliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the\nre-projection error even more.\n\nThe methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to\ndistinguish inliers from outliers. The method LMeDS does not need any threshold but it works\ncorrectly only when there are more than 50% of inliers. Finally, if there are no outliers and the\nnoise is rather small, use the default method (method=0).\n\nThe function is used to find initial intrinsic and extrinsic matrices. Homography matrix is\ndetermined up to a scale. Thus, it is normalized so that \\f$h_{33}=1\\f$. Note that whenever an H matrix\ncannot be estimated, an empty one will be returned.\n\n@sa\n   getAffineTransform, getPerspectiveTransform, estimateRigidTransform, warpPerspective,\n    perspectiveTransform\n\n@note\n   -   A example on calculating a homography for image matching can be found at\n        opencv_source_code/samples/cpp/video_homography.cpp\n\n */\nCV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,\n                                 int method = 0, double ransacReprojThreshold = 3,\n                                 OutputArray mask=noArray(), const int maxIters = 2000,\n                                 const double confidence = 0.995);\n\n/** @overload */\nCV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,\n                               OutputArray mask, int method = 0, double ransacReprojThreshold = 3 );\n\n/** @brief Computes an RQ decomposition of 3x3 matrices.\n\n@param src 3x3 input matrix.\n@param mtxR Output 3x3 upper-triangular matrix.\n@param mtxQ Output 3x3 orthogonal matrix.\n@param Qx Optional output 3x3 rotation matrix around x-axis.\n@param Qy Optional output 3x3 rotation matrix around y-axis.\n@param Qz Optional output 3x3 rotation matrix around z-axis.\n\nThe function computes a RQ decomposition using the given rotations. This function is used in\ndecomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera\nand a rotation matrix.\n\nIt optionally returns three rotation matrices, one for each axis, and the three Euler angles in\ndegrees (as the return value) that could be used in OpenGL. Note, there is always more than one\nsequence of rotations about the three principle axes that results in the same orientation of an\nobject, eg. see @cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angules\nare only one of the possible solutions.\n */\nCV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,\n                                OutputArray Qx = noArray(),\n                                OutputArray Qy = noArray(),\n                                OutputArray Qz = noArray());\n\n/** @brief Decomposes a projection matrix into a rotation matrix and a camera matrix.\n\n@param projMatrix 3x4 input projection matrix P.\n@param cameraMatrix Output 3x3 camera matrix K.\n@param rotMatrix Output 3x3 external rotation matrix R.\n@param transVect Output 4x1 translation vector T.\n@param rotMatrixX Optional 3x3 rotation matrix around x-axis.\n@param rotMatrixY Optional 3x3 rotation matrix around y-axis.\n@param rotMatrixZ Optional 3x3 rotation matrix around z-axis.\n@param eulerAngles Optional three-element vector containing three Euler angles of rotation in\ndegrees.\n\nThe function computes a decomposition of a projection matrix into a calibration and a rotation\nmatrix and the position of a camera.\n\nIt optionally returns three rotation matrices, one for each axis, and three Euler angles that could\nbe used in OpenGL. Note, there is always more than one sequence of rotations about the three\nprinciple axes that results in the same orientation of an object, eg. see @cite Slabaugh . Returned\ntree rotation matrices and corresponding three Euler angules are only one of the possible solutions.\n\nThe function is based on RQDecomp3x3 .\n */\nCV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,\n                                             OutputArray rotMatrix, OutputArray transVect,\n                                             OutputArray rotMatrixX = noArray(),\n                                             OutputArray rotMatrixY = noArray(),\n                                             OutputArray rotMatrixZ = noArray(),\n                                             OutputArray eulerAngles =noArray() );\n\n/** @brief Computes partial derivatives of the matrix product for each multiplied matrix.\n\n@param A First multiplied matrix.\n@param B Second multiplied matrix.\n@param dABdA First output derivative matrix d(A\\*B)/dA of size\n\\f$\\texttt{A.rows*B.cols} \\times {A.rows*A.cols}\\f$ .\n@param dABdB Second output derivative matrix d(A\\*B)/dB of size\n\\f$\\texttt{A.rows*B.cols} \\times {B.rows*B.cols}\\f$ .\n\nThe function computes partial derivatives of the elements of the matrix product \\f$A*B\\f$ with regard to\nthe elements of each of the two input matrices. The function is used to compute the Jacobian\nmatrices in stereoCalibrate but can also be used in any other similar optimization function.\n */\nCV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB );\n\n/** @brief Combines two rotation-and-shift transformations.\n\n@param rvec1 First rotation vector.\n@param tvec1 First translation vector.\n@param rvec2 Second rotation vector.\n@param tvec2 Second translation vector.\n@param rvec3 Output rotation vector of the superposition.\n@param tvec3 Output translation vector of the superposition.\n@param dr3dr1\n@param dr3dt1\n@param dr3dr2\n@param dr3dt2\n@param dt3dr1\n@param dt3dt1\n@param dt3dr2\n@param dt3dt2 Optional output derivatives of rvec3 or tvec3 with regard to rvec1, rvec2, tvec1 and\ntvec2, respectively.\n\nThe functions compute:\n\n\\f[\\begin{array}{l} \\texttt{rvec3} =  \\mathrm{rodrigues} ^{-1} \\left ( \\mathrm{rodrigues} ( \\texttt{rvec2} )  \\cdot \\mathrm{rodrigues} ( \\texttt{rvec1} ) \\right )  \\\\ \\texttt{tvec3} =  \\mathrm{rodrigues} ( \\texttt{rvec2} )  \\cdot \\texttt{tvec1} +  \\texttt{tvec2} \\end{array} ,\\f]\n\nwhere \\f$\\mathrm{rodrigues}\\f$ denotes a rotation vector to a rotation matrix transformation, and\n\\f$\\mathrm{rodrigues}^{-1}\\f$ denotes the inverse transformation. See Rodrigues for details.\n\nAlso, the functions can compute the derivatives of the output vectors with regards to the input\nvectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in\nyour own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a\nfunction that contains a matrix multiplication.\n */\nCV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,\n                             InputArray rvec2, InputArray tvec2,\n                             OutputArray rvec3, OutputArray tvec3,\n                             OutputArray dr3dr1 = noArray(), OutputArray dr3dt1 = noArray(),\n                             OutputArray dr3dr2 = noArray(), OutputArray dr3dt2 = noArray(),\n                             OutputArray dt3dr1 = noArray(), OutputArray dt3dt1 = noArray(),\n                             OutputArray dt3dr2 = noArray(), OutputArray dt3dt2 = noArray() );\n\n/** @brief Projects 3D points to an image plane.\n\n@param objectPoints Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or\nvector\\<Point3f\\> ), where N is the number of points in the view.\n@param rvec Rotation vector. See Rodrigues for details.\n@param tvec Translation vector.\n@param cameraMatrix Camera matrix \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$ .\n@param distCoeffs Input vector of distortion coefficients\n\\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f$ of\n4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.\n@param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or\nvector\\<Point2f\\> .\n@param jacobian Optional output 2Nx(10+\\<numDistCoeffs\\>) jacobian matrix of derivatives of image\npoints with respect to components of the rotation vector, translation vector, focal lengths,\ncoordinates of the principal point and the distortion coefficients. In the old interface different\ncomponents of the jacobian are returned via different output parameters.\n@param aspectRatio Optional \"fixed aspect ratio\" parameter. If the parameter is not 0, the\nfunction assumes that the aspect ratio (*fx/fy*) is fixed and correspondingly adjusts the jacobian\nmatrix.\n\nThe function computes projections of 3D points to the image plane given intrinsic and extrinsic\ncamera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of\nimage points coordinates (as functions of all the input parameters) with respect to the particular\nparameters, intrinsic and/or extrinsic. The Jacobians are used during the global optimization in\ncalibrateCamera, solvePnP, and stereoCalibrate . The function itself can also be used to compute a\nre-projection error given the current intrinsic and extrinsic parameters.\n\n@note By setting rvec=tvec=(0,0,0) or by setting cameraMatrix to a 3x3 identity matrix, or by\npassing zero distortion coefficients, you can get various useful partial cases of the function. This\nmeans that you can compute the distorted coordinates for a sparse set of points or apply a\nperspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.\n */\nCV_EXPORTS_W void projectPoints( InputArray objectPoints,\n                                 InputArray rvec, InputArray tvec,\n                                 InputArray cameraMatrix, InputArray distCoeffs,\n                                 OutputArray imagePoints,\n                                 OutputArray jacobian = noArray(),\n                                 double aspectRatio = 0 );\n\n/** @brief Finds an object pose from 3D-2D point correspondences.\n\n@param objectPoints Array of object points in the object coordinate space, 3xN/Nx3 1-channel or\n1xN/Nx1 3-channel, where N is the number of points. vector\\<Point3f\\> can be also passed here.\n@param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel,\nwhere N is the number of points. vector\\<Point2f\\> can be also passed here.\n@param cameraMatrix Input camera matrix \\f$A = \\vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\\f$ .\n@param distCoeffs Input vector of distortion coefficients\n\\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f$ of\n4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are\nassumed.\n@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from\nthe model coordinate system to the camera coordinate system.\n@param tvec Output translation vector.\n@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses\nthe provided rvec and tvec values as initial approximations of the rotation and translation\nvectors, respectively, and further optimizes them.\n@param flags Method for solving a PnP problem:\n-   **SOLVEPNP_ITERATIVE** Iterative method is based on Levenberg-Marquardt optimization. In\nthis case the function finds such a pose that minimizes reprojection error, that is the sum\nof squared distances between the observed projections imagePoints and the projected (using\nprojectPoints ) objectPoints .\n-   **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang\n\"Complete Solution Classification for the Perspective-Three-Point Problem\". In this case the\nfunction requires exactly four object and image points.\n-   **SOLVEPNP_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the\npaper \"EPnP: Efficient Perspective-n-Point Camera Pose Estimation\".\n-   **SOLVEPNP_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.\n\"A Direct Least-Squares (DLS) Method for PnP\".\n-   **SOLVEPNP_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,\nF.Moreno-Noguer. \"Exhaustive Linearization for Robust Camera Pose and Focal Length\nEstimation\". In this case the function also estimates the parameters \\f$f_x\\f$ and \\f$f_y\\f$\nassuming that both have the same value. Then the cameraMatrix is updated with the estimated\nfocal length.\n\nThe function estimates the object pose given a set of object points, their corresponding image\nprojections, as well as the camera matrix and the distortion coefficients.\n\n@note\n   -   An example of how to use solvePnP for planar augmented reality can be found at\n        opencv_source_code/samples/python/plane_ar.py\n   -   If you are using Python:\n        - Numpy array slices won't work as input because solvePnP requires contiguous\n        arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of\n        modules/calib3d/src/solvepnp.cpp version 2.4.9)\n        - The P3P algorithm requires image points to be in an array of shape (N,1,2) due\n        to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)\n        which requires 2-channel information.\n        - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of\n        it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =\n        np.ascontiguousarray(D[:,:2]).reshape((N,1,2))\n */\nCV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,\n                            InputArray cameraMatrix, InputArray distCoeffs,\n                            OutputArray rvec, OutputArray tvec,\n                            bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE );\n\n/** @brief Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.\n\n@param objectPoints Array of object points in the object coordinate space, 3xN/Nx3 1-channel or\n1xN/Nx1 3-channel, where N is the number of points. vector\\<Point3f\\> can be also passed here.\n@param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel,\nwhere N is the number of points. vector\\<Point2f\\> can be also passed here.\n@param cameraMatrix Input camera matrix \\f$A = \\vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\\f$ .\n@param distCoeffs Input vector of distortion coefficients\n\\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f$ of\n4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are\nassumed.\n@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from\nthe model coordinate system to the camera coordinate system.\n@param tvec Output translation vector.\n@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses\nthe provided rvec and tvec values as initial approximations of the rotation and translation\nvectors, respectively, and further optimizes them.\n@param iterationsCount Number of iterations.\n@param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value\nis the maximum allowed distance between the observed and computed point projections to consider it\nan inlier.\n@param confidence The probability that the algorithm produces a useful result.\n@param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .\n@param flags Method for solving a PnP problem (see solvePnP ).\n\nThe function estimates an object pose given a set of object points, their corresponding image\nprojections, as well as the camera matrix and the distortion coefficients. This function finds such\na pose that minimizes reprojection error, that is, the sum of squared distances between the observed\nprojections imagePoints and the projected (using projectPoints ) objectPoints. The use of RANSAC\nmakes the function resistant to outliers.\n\n@note\n   -   An example of how to use solvePNPRansac for object detection can be found at\n        opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/\n */\nCV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints,\n                                  InputArray cameraMatrix, InputArray distCoeffs,\n                                  OutputArray rvec, OutputArray tvec,\n                                  bool useExtrinsicGuess = false, int iterationsCount = 100,\n                                  float reprojectionError = 8.0, double confidence = 0.99,\n                                  OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE );\n\n/** @brief Finds an initial camera matrix from 3D-2D point correspondences.\n\n@param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern\ncoordinate space. In the old interface all the per-view vectors are concatenated. See\ncalibrateCamera for details.\n@param imagePoints Vector of vectors of the projections of the calibration pattern points. In the\nold interface all the per-view vectors are concatenated.\n@param imageSize Image size in pixels used to initialize the principal point.\n@param aspectRatio If it is zero or negative, both \\f$f_x\\f$ and \\f$f_y\\f$ are estimated independently.\nOtherwise, \\f$f_x = f_y * \\texttt{aspectRatio}\\f$ .\n\nThe function estimates and returns an initial camera matrix for the camera calibration process.\nCurrently, the function only supports planar calibration patterns, which are patterns where each\nobject point has z-coordinate =0.\n */\nCV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,\n                                     InputArrayOfArrays imagePoints,\n                                     Size imageSize, double aspectRatio = 1.0 );\n\n/** @brief Finds the positions of internal corners of the chessboard.\n\n@param image Source chessboard view. It must be an 8-bit grayscale or color image.\n@param patternSize Number of inner corners per a chessboard row and column\n( patternSize = cvSize(points_per_row,points_per_colum) = cvSize(columns,rows) ).\n@param corners Output array of detected corners.\n@param flags Various operation flags that can be zero or a combination of the following values:\n-   **CV_CALIB_CB_ADAPTIVE_THRESH** Use adaptive thresholding to convert the image to black\nand white, rather than a fixed threshold level (computed from the average image brightness).\n-   **CV_CALIB_CB_NORMALIZE_IMAGE** Normalize the image gamma with equalizeHist before\napplying fixed or adaptive thresholding.\n-   **CV_CALIB_CB_FILTER_QUADS** Use additional criteria (like contour area, perimeter,\nsquare-like shape) to filter out false quads extracted at the contour retrieval stage.\n-   **CALIB_CB_FAST_CHECK** Run a fast check on the image that looks for chessboard corners,\nand shortcut the call if none is found. This can drastically speed up the call in the\ndegenerate condition when no chessboard is observed.\n\nThe function attempts to determine whether the input image is a view of the chessboard pattern and\nlocate the internal chessboard corners. The function returns a non-zero value if all of the corners\nare found and they are placed in a certain order (row by row, left to right in every row).\nOtherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,\na regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black\nsquares touch each other. The detected coordinates are approximate, and to determine their positions\nmore accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with\ndifferent parameters if returned coordinates are not accurate enough.\n\nSample usage of detecting and drawing chessboard corners: :\n@code\n    Size patternsize(8,6); //interior number of corners\n    Mat gray = ....; //source image\n    vector<Point2f> corners; //this will be filled by the detected corners\n\n    //CALIB_CB_FAST_CHECK saves a lot of time on images\n    //that do not contain any chessboard corners\n    bool patternfound = findChessboardCorners(gray, patternsize, corners,\n            CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE\n            + CALIB_CB_FAST_CHECK);\n\n    if(patternfound)\n      cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),\n        TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));\n\n    drawChessboardCorners(img, patternsize, Mat(corners), patternfound);\n@endcode\n@note The function requires white space (like a square-thick border, the wider the better) around\nthe board to make the detection more robust in various environments. Otherwise, if there is no\nborder and the background is dark, the outer black squares cannot be segmented properly and so the\nsquare grouping and ordering algorithm fails.\n */\nCV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners,\n                                         int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE );\n\n//! finds subpixel-accurate positions of the chessboard corners\nCV_EXPORTS bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners, Size region_size );\n\n/** @brief Renders the detected chessboard corners.\n\n@param image Destination image. It must be an 8-bit color image.\n@param patternSize Number of inner corners per a chessboard row and column\n(patternSize = cv::Size(points_per_row,points_per_column)).\n@param corners Array of detected corners, the output of findChessboardCorners.\n@param patternWasFound Parameter indicating whether the complete board was found or not. The\nreturn value of findChessboardCorners should be passed here.\n\nThe function draws individual chessboard corners detected either as red circles if the board was not\nfound, or as colored corners connected with lines if the board was found.\n */\nCV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,\n                                         InputArray corners, bool patternWasFound );\n\n/** @brief Finds centers in the grid of circles.\n\n@param image grid view of input circles; it must be an 8-bit grayscale or color image.\n@param patternSize number of circles per row and column\n( patternSize = Size(points_per_row, points_per_colum) ).\n@param centers output array of detected centers.\n@param flags various operation flags that can be one of the following values:\n-   **CALIB_CB_SYMMETRIC_GRID** uses symmetric pattern of circles.\n-   **CALIB_CB_ASYMMETRIC_GRID** uses asymmetric pattern of circles.\n-   **CALIB_CB_CLUSTERING** uses a special algorithm for grid detection. It is more robust to\nperspective distortions but much more sensitive to background clutter.\n@param blobDetector feature detector that finds blobs like dark circles on light background.\n\nThe function attempts to determine whether the input image contains a grid of circles. If it is, the\nfunction locates centers of the circles. The function returns a non-zero value if all of the centers\nhave been found and they have been placed in a certain order (row by row, left to right in every\nrow). Otherwise, if the function fails to find all the corners or reorder them, it returns 0.\n\nSample usage of detecting and drawing the centers of circles: :\n@code\n    Size patternsize(7,7); //number of centers\n    Mat gray = ....; //source image\n    vector<Point2f> centers; //this will be filled by the detected centers\n\n    bool patternfound = findCirclesGrid(gray, patternsize, centers);\n\n    drawChessboardCorners(img, patternsize, Mat(centers), patternfound);\n@endcode\n@note The function requires white space (like a square-thick border, the wider the better) around\nthe board to make the detection more robust in various environments.\n */\nCV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,\n                                   OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,\n                                   const Ptr<FeatureDetector> &blobDetector = SimpleBlobDetector::create());\n\n/** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.\n\n@param objectPoints In the new interface it is a vector of vectors of calibration pattern points in\nthe calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer\nvector contains as many elements as the number of the pattern views. If the same calibration pattern\nis shown in each view and it is fully visible, all the vectors will be the same. Although, it is\npossible to use partially occluded patterns, or even different patterns in different views. Then,\nthe vectors will be different. The points are 3D, but since they are in a pattern coordinate system,\nthen, if the rig is planar, it may make sense to put the model to a XY coordinate plane so that\nZ-coordinate of each input object point is 0.\nIn the old interface all the vectors of object points from different views are concatenated\ntogether.\n@param imagePoints In the new interface it is a vector of vectors of the projections of calibration\npattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and\nobjectPoints.size() and imagePoints[i].size() must be equal to objectPoints[i].size() for each i.\nIn the old interface all the vectors of object points from different views are concatenated\ntogether.\n@param imageSize Size of the image used only to initialize the intrinsic camera matrix.\n@param cameraMatrix Output 3x3 floating-point camera matrix\n\\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ . If CV\\_CALIB\\_USE\\_INTRINSIC\\_GUESS\nand/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be\ninitialized before calling the function.\n@param distCoeffs Output vector of distortion coefficients\n\\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f$ of\n4, 5, 8, 12 or 14 elements.\n@param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view\n(e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding\nk-th translation vector (see the next output parameter description) brings the calibration pattern\nfrom the model coordinate space (in which object points are specified) to the world coordinate\nspace, that is, a real position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).\n@param tvecs Output vector of translation vectors estimated for each pattern view.\n@param flags Different flags that may be zero or a combination of the following values:\n-   **CV_CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of\nfx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image\ncenter ( imageSize is used), and focal distances are computed in a least-squares fashion.\nNote, that if intrinsic parameters are known, there is no need to use this function just to\nestimate extrinsic parameters. Use solvePnP instead.\n-   **CV_CALIB_FIX_PRINCIPAL_POINT** The principal point is not changed during the global\noptimization. It stays at the center or at a different location specified when\nCV_CALIB_USE_INTRINSIC_GUESS is set too.\n-   **CV_CALIB_FIX_ASPECT_RATIO** The functions considers only fy as a free parameter. The\nratio fx/fy stays the same as in the input cameraMatrix . When\nCV_CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are\nignored, only their ratio is computed and used further.\n-   **CV_CALIB_ZERO_TANGENT_DIST** Tangential distortion coefficients \\f$(p_1, p_2)\\f$ are set\nto zeros and stay zero.\n-   **CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6** The corresponding radial distortion\ncoefficient is not changed during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is\nset, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.\n-   **CV_CALIB_RATIONAL_MODEL** Coefficients k4, k5, and k6 are enabled. To provide the\nbackward compatibility, this extra flag should be explicitly specified to make the\ncalibration function use the rational model and return 8 coefficients. If the flag is not\nset, the function computes and returns only 5 distortion coefficients.\n-   **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the\nbackward compatibility, this extra flag should be explicitly specified to make the\ncalibration function use the thin prism model and return 12 coefficients. If the flag is not\nset, the function computes and returns only 5 distortion coefficients.\n-   **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during\nthe optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the\nsupplied distCoeffs matrix is used. Otherwise, it is set to 0.\n-   **CALIB_TILTED_MODEL** Coefficients tauX and tauY are enabled. To provide the\nbackward compatibility, this extra flag should be explicitly specified to make the\ncalibration function use the tilted sensor model and return 14 coefficients. If the flag is not\nset, the function computes and returns only 5 distortion coefficients.\n-   **CALIB_FIX_TAUX_TAUY** The coefficients of the tilted sensor model are not changed during\nthe optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the\nsupplied distCoeffs matrix is used. Otherwise, it is set to 0.\n@param criteria Termination criteria for the iterative optimization algorithm.\n\nThe function estimates the intrinsic camera parameters and extrinsic parameters for each of the\nviews. The algorithm is based on @cite Zhang2000 and @cite BouguetMCT . The coordinates of 3D object\npoints and their corresponding 2D projections in each view must be specified. That may be achieved\nby using an object with a known geometry and easily detectable feature points. Such an object is\ncalled a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as\na calibration rig (see findChessboardCorners ). Currently, initialization of intrinsic parameters\n(when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration\npatterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also\nbe used as long as initial cameraMatrix is provided.\n\nThe algorithm performs the following steps:\n\n-   Compute the initial intrinsic parameters (the option only available for planar calibration\n    patterns) or read them from the input parameters. The distortion coefficients are all set to\n    zeros initially unless some of CV_CALIB_FIX_K? are specified.\n\n-   Estimate the initial camera pose as if the intrinsic parameters have been already known. This is\n    done using solvePnP .\n\n-   Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,\n    that is, the total sum of squared distances between the observed feature points imagePoints and\n    the projected (using the current estimates for camera parameters and the poses) object points\n    objectPoints. See projectPoints for details.\n\nThe function returns the final re-projection error.\n\n@note\n   If you use a non-square (=non-NxN) grid and findChessboardCorners for calibration, and\n    calibrateCamera returns bad values (zero distortion coefficients, an image center very far from\n    (w/2-0.5,h/2-0.5), and/or large differences between \\f$f_x\\f$ and \\f$f_y\\f$ (ratios of 10:1 or more)),\n    then you have probably used patternSize=cvSize(rows,cols) instead of using\n    patternSize=cvSize(cols,rows) in findChessboardCorners .\n\n@sa\n   findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort\n */\nCV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,\n                                     InputArrayOfArrays imagePoints, Size imageSize,\n                                     InputOutputArray cameraMatrix, InputOutputArray distCoeffs,\n                                     OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,\n                                     int flags = 0, TermCriteria criteria = TermCriteria(\n                                        TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );\n\n/** @brief Computes useful camera characteristics from the camera matrix.\n\n@param cameraMatrix Input camera matrix that can be estimated by calibrateCamera or\nstereoCalibrate .\n@param imageSize Input image size in pixels.\n@param apertureWidth Physical width in mm of the sensor.\n@param apertureHeight Physical height in mm of the sensor.\n@param fovx Output field of view in degrees along the horizontal sensor axis.\n@param fovy Output field of view in degrees along the vertical sensor axis.\n@param focalLength Focal length of the lens in mm.\n@param principalPoint Principal point in mm.\n@param aspectRatio \\f$f_y/f_x\\f$\n\nThe function computes various useful camera characteristics from the previously estimated camera\nmatrix.\n\n@note\n   Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for\n    the chessboard pitch (it can thus be any value).\n */\nCV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, Size imageSize,\n                                           double apertureWidth, double apertureHeight,\n                                           CV_OUT double& fovx, CV_OUT double& fovy,\n                                           CV_OUT double& focalLength, CV_OUT Point2d& principalPoint,\n                                           CV_OUT double& aspectRatio );\n\n/** @brief Calibrates the stereo camera.\n\n@param objectPoints Vector of vectors of the calibration pattern points.\n@param imagePoints1 Vector of vectors of the projections of the calibration pattern points,\nobserved by the first camera.\n@param imagePoints2 Vector of vectors of the projections of the calibration pattern points,\nobserved by the second camera.\n@param cameraMatrix1 Input/output first camera matrix:\n\\f$\\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\\f$ , \\f$j = 0,\\, 1\\f$ . If\nany of CV_CALIB_USE_INTRINSIC_GUESS , CV_CALIB_FIX_ASPECT_RATIO ,\nCV_CALIB_FIX_INTRINSIC , or CV_CALIB_FIX_FOCAL_LENGTH are specified, some or all of the\nmatrix components must be initialized. See the flags description for details.\n@param distCoeffs1 Input/output vector of distortion coefficients\n\\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f$ of\n4, 5, 8, 12 or 14 elements. The output vector length depends on the flags.\n@param cameraMatrix2 Input/output second camera matrix. The parameter is similar to cameraMatrix1\n@param distCoeffs2 Input/output lens distortion coefficients for the second camera. The parameter\nis similar to distCoeffs1 .\n@param imageSize Size of the image used only to initialize intrinsic camera matrix.\n@param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.\n@param T Output translation vector between the coordinate systems of the cameras.\n@param E Output essential matrix.\n@param F Output fundamental matrix.\n@param flags Different flags that may be zero or a combination of the following values:\n-   **CV_CALIB_FIX_INTRINSIC** Fix cameraMatrix? and distCoeffs? so that only R, T, E , and F\nmatrices are estimated.\n-   **CV_CALIB_USE_INTRINSIC_GUESS** Optimize some or all of the intrinsic parameters\naccording to the specified flags. Initial values are provided by the user.\n-   **CV_CALIB_FIX_PRINCIPAL_POINT** Fix the principal points during the optimization.\n-   **CV_CALIB_FIX_FOCAL_LENGTH** Fix \\f$f^{(j)}_x\\f$ and \\f$f^{(j)}_y\\f$ .\n-   **CV_CALIB_FIX_ASPECT_RATIO** Optimize \\f$f^{(j)}_y\\f$ . Fix the ratio \\f$f^{(j)}_x/f^{(j)}_y\\f$\n.\n-   **CV_CALIB_SAME_FOCAL_LENGTH** Enforce \\f$f^{(0)}_x=f^{(1)}_x\\f$ and \\f$f^{(0)}_y=f^{(1)}_y\\f$ .\n-   **CV_CALIB_ZERO_TANGENT_DIST** Set tangential distortion coefficients for each camera to\nzeros and fix there.\n-   **CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6** Do not change the corresponding radial\ndistortion coefficient during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set,\nthe coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.\n-   **CV_CALIB_RATIONAL_MODEL** Enable coefficients k4, k5, and k6. To provide the backward\ncompatibility, this extra flag should be explicitly specified to make the calibration\nfunction use the rational model and return 8 coefficients. If the flag is not set, the\nfunction computes and returns only 5 distortion coefficients.\n-   **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the\nbackward compatibility, this extra flag should be explicitly specified to make the\ncalibration function use the thin prism model and return 12 coefficients. If the flag is not\nset, the function computes and returns only 5 distortion coefficients.\n-   **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during\nthe optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the\nsupplied distCoeffs matrix is used. Otherwise, it is set to 0.\n-   **CALIB_TILTED_MODEL** Coefficients tauX and tauY are enabled. To provide the\nbackward compatibility, this extra flag should be explicitly specified to make the\ncalibration function use the tilted sensor model and return 14 coefficients. If the flag is not\nset, the function computes and returns only 5 distortion coefficients.\n-   **CALIB_FIX_TAUX_TAUY** The coefficients of the tilted sensor model are not changed during\nthe optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the\nsupplied distCoeffs matrix is used. Otherwise, it is set to 0.\n@param criteria Termination criteria for the iterative optimization algorithm.\n\nThe function estimates transformation between two cameras making a stereo pair. If you have a stereo\ncamera where the relative position and orientation of two cameras is fixed, and if you computed\nposes of an object relative to the first camera and to the second camera, (R1, T1) and (R2, T2),\nrespectively (this can be done with solvePnP ), then those poses definitely relate to each other.\nThis means that, given ( \\f$R_1\\f$,\\f$T_1\\f$ ), it should be possible to compute ( \\f$R_2\\f$,\\f$T_2\\f$ ). You only\nneed to know the position and orientation of the second camera relative to the first camera. This is\nwhat the described function does. It computes ( \\f$R\\f$,\\f$T\\f$ ) so that:\n\n\\f[R_2=R*R_1\nT_2=R*T_1 + T,\\f]\n\nOptionally, it computes the essential matrix E:\n\n\\f[E= \\vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} *R\\f]\n\nwhere \\f$T_i\\f$ are components of the translation vector \\f$T\\f$ : \\f$T=[T_0, T_1, T_2]^T\\f$ . And the function\ncan also compute the fundamental matrix F:\n\n\\f[F = cameraMatrix2^{-T} E cameraMatrix1^{-1}\\f]\n\nBesides the stereo-related information, the function can also perform a full calibration of each of\ntwo cameras. However, due to the high dimensionality of the parameter space and noise in the input\ndata, the function can diverge from the correct solution. If the intrinsic parameters can be\nestimated with high accuracy for each of the cameras individually (for example, using\ncalibrateCamera ), you are recommended to do so and then pass CV_CALIB_FIX_INTRINSIC flag to the\nfunction along with the computed intrinsic parameters. Otherwise, if all the parameters are\nestimated at once, it makes sense to restrict some parameters, for example, pass\nCV_CALIB_SAME_FOCAL_LENGTH and CV_CALIB_ZERO_TANGENT_DIST flags, which is usually a\nreasonable assumption.\n\nSimilarly to calibrateCamera , the function minimizes the total re-projection error for all the\npoints in all the available views from both cameras. The function returns the final value of the\nre-projection error.\n */\nCV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,\n                                     InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,\n                                     InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,\n                                     InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,\n                                     Size imageSize, OutputArray R,OutputArray T, OutputArray E, OutputArray F,\n                                     int flags = CALIB_FIX_INTRINSIC,\n                                     TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) );\n\n\n/** @brief Computes rectification transforms for each head of a calibrated stereo camera.\n\n@param cameraMatrix1 First camera matrix.\n@param distCoeffs1 First camera distortion parameters.\n@param cameraMatrix2 Second camera matrix.\n@param distCoeffs2 Second camera distortion parameters.\n@param imageSize Size of the image used for stereo calibration.\n@param R Rotation matrix between the coordinate systems of the first and the second cameras.\n@param T Translation vector between coordinate systems of the cameras.\n@param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.\n@param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.\n@param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first\ncamera.\n@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second\ncamera.\n@param Q Output \\f$4 \\times 4\\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).\n@param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY . If the flag is set,\nthe function makes the principal points of each camera have the same pixel coordinates in the\nrectified views. And if the flag is not set, the function may still shift the images in the\nhorizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the\nuseful image area.\n@param alpha Free scaling parameter. If it is -1 or absent, the function performs the default\nscaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified\nimages are zoomed and shifted so that only valid pixels are visible (no black areas after\nrectification). alpha=1 means that the rectified image is decimated and shifted so that all the\npixels from the original images from the cameras are retained in the rectified images (no source\nimage pixels are lost). Obviously, any intermediate value yields an intermediate result between\nthose two extreme cases.\n@param newImageSize New image resolution after rectification. The same size should be passed to\ninitUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)\nis passed (default), it is set to the original imageSize . Setting it to larger value can help you\npreserve details in the original image, especially when there is a big radial distortion.\n@param validPixROI1 Optional output rectangles inside the rectified images where all the pixels\nare valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller\n(see the picture below).\n@param validPixROI2 Optional output rectangles inside the rectified images where all the pixels\nare valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller\n(see the picture below).\n\nThe function computes the rotation matrices for each camera that (virtually) make both camera image\nplanes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies\nthe dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate\nas input. As output, it provides two rotation matrices and also two projection matrices in the new\ncoordinates. The function distinguishes the following two cases:\n\n-   **Horizontal stereo**: the first and the second camera views are shifted relative to each other\n    mainly along the x axis (with possible small vertical shift). In the rectified images, the\n    corresponding epipolar lines in the left and right cameras are horizontal and have the same\n    y-coordinate. P1 and P2 look like:\n\n    \\f[\\texttt{P1} = \\begin{bmatrix} f & 0 & cx_1 & 0 \\\\ 0 & f & cy & 0 \\\\ 0 & 0 & 1 & 0 \\end{bmatrix}\\f]\n\n    \\f[\\texttt{P2} = \\begin{bmatrix} f & 0 & cx_2 & T_x*f \\\\ 0 & f & cy & 0 \\\\ 0 & 0 & 1 & 0 \\end{bmatrix} ,\\f]\n\n    where \\f$T_x\\f$ is a horizontal shift between the cameras and \\f$cx_1=cx_2\\f$ if\n    CV_CALIB_ZERO_DISPARITY is set.\n\n-   **Vertical stereo**: the first and the second camera views are shifted relative to each other\n    mainly in vertical direction (and probably a bit in the horizontal direction too). The epipolar\n    lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:\n\n    \\f[\\texttt{P1} = \\begin{bmatrix} f & 0 & cx & 0 \\\\ 0 & f & cy_1 & 0 \\\\ 0 & 0 & 1 & 0 \\end{bmatrix}\\f]\n\n    \\f[\\texttt{P2} = \\begin{bmatrix} f & 0 & cx & 0 \\\\ 0 & f & cy_2 & T_y*f \\\\ 0 & 0 & 1 & 0 \\end{bmatrix} ,\\f]\n\n    where \\f$T_y\\f$ is a vertical shift between the cameras and \\f$cy_1=cy_2\\f$ if CALIB_ZERO_DISPARITY is\n    set.\n\nAs you can see, the first three columns of P1 and P2 will effectively be the new \"rectified\" camera\nmatrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to\ninitialize the rectification map for each camera.\n\nSee below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through\nthe corresponding image regions. This means that the images are well rectified, which is what most\nstereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that\ntheir interiors are all valid pixels.\n\n![image](pics/stereo_undistort.jpg)\n */\nCV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,\n                                 InputArray cameraMatrix2, InputArray distCoeffs2,\n                                 Size imageSize, InputArray R, InputArray T,\n                                 OutputArray R1, OutputArray R2,\n                                 OutputArray P1, OutputArray P2,\n                                 OutputArray Q, int flags = CALIB_ZERO_DISPARITY,\n                                 double alpha = -1, Size newImageSize = Size(),\n                                 CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 );\n\n/** @brief Computes a rectification transform for an uncalibrated stereo camera.\n\n@param points1 Array of feature points in the first image.\n@param points2 The corresponding points in the second image. The same formats as in\nfindFundamentalMat are supported.\n@param F Input fundamental matrix. It can be computed from the same set of point pairs using\nfindFundamentalMat .\n@param imgSize Size of the image.\n@param H1 Output rectification homography matrix for the first image.\n@param H2 Output rectification homography matrix for the second image.\n@param threshold Optional threshold used to filter out the outliers. If the parameter is greater\nthan zero, all the point pairs that do not comply with the epipolar geometry (that is, the points\nfor which \\f$|\\texttt{points2[i]}^T*\\texttt{F}*\\texttt{points1[i]}|>\\texttt{threshold}\\f$ ) are\nrejected prior to computing the homographies. Otherwise,all the points are considered inliers.\n\nThe function computes the rectification transformations without knowing intrinsic parameters of the\ncameras and their relative position in the space, which explains the suffix \"uncalibrated\". Another\nrelated difference from stereoRectify is that the function outputs not the rectification\ntransformations in the object (3D) space, but the planar perspective transformations encoded by the\nhomography matrices H1 and H2 . The function implements the algorithm @cite Hartley99 .\n\n@note\n   While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily\n    depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,\n    it would be better to correct it before computing the fundamental matrix and calling this\n    function. For example, distortion coefficients can be estimated for each head of stereo camera\n    separately by using calibrateCamera . Then, the images can be corrected using undistort , or\n    just the point coordinates can be corrected with undistortPoints .\n */\nCV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,\n                                             InputArray F, Size imgSize,\n                                             OutputArray H1, OutputArray H2,\n                                             double threshold = 5 );\n\n//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.\nCV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,\n                                      InputArray cameraMatrix2, InputArray distCoeffs2,\n                                      InputArray cameraMatrix3, InputArray distCoeffs3,\n                                      InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3,\n                                      Size imageSize, InputArray R12, InputArray T12,\n                                      InputArray R13, InputArray T13,\n                                      OutputArray R1, OutputArray R2, OutputArray R3,\n                                      OutputArray P1, OutputArray P2, OutputArray P3,\n                                      OutputArray Q, double alpha, Size newImgSize,\n                                      CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );\n\n/** @brief Returns the new camera matrix based on the free scaling parameter.\n\n@param cameraMatrix Input camera matrix.\n@param distCoeffs Input vector of distortion coefficients\n\\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f$ of\n4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are\nassumed.\n@param imageSize Original image size.\n@param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are\nvalid) and 1 (when all the source image pixels are retained in the undistorted image). See\nstereoRectify for details.\n@param newImgSize Image size after rectification. By default,it is set to imageSize .\n@param validPixROI Optional output rectangle that outlines all-good-pixels region in the\nundistorted image. See roi1, roi2 description in stereoRectify .\n@param centerPrincipalPoint Optional flag that indicates whether in the new camera matrix the\nprincipal point should be at the image center or not. By default, the principal point is chosen to\nbest fit a subset of the source image (determined by alpha) to the corrected image.\n@return new_camera_matrix Output new camera matrix.\n\nThe function computes and returns the optimal new camera matrix based on the free scaling parameter.\nBy varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original\nimage pixels if there is valuable information in the corners alpha=1 , or get something in between.\nWhen alpha\\>0 , the undistortion result is likely to have some black pixels corresponding to\n\"virtual\" pixels outside of the captured distorted image. The original camera matrix, distortion\ncoefficients, the computed new camera matrix, and newImageSize should be passed to\ninitUndistortRectifyMap to produce the maps for remap .\n */\nCV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,\n                                            Size imageSize, double alpha, Size newImgSize = Size(),\n                                            CV_OUT Rect* validPixROI = 0,\n                                            bool centerPrincipalPoint = false);\n\n/** @brief Converts points from Euclidean to homogeneous space.\n\n@param src Input vector of N-dimensional points.\n@param dst Output vector of N+1-dimensional points.\n\nThe function converts points from Euclidean to homogeneous space by appending 1's to the tuple of\npoint coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).\n */\nCV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );\n\n/** @brief Converts points from homogeneous to Euclidean space.\n\n@param src Input vector of N-dimensional points.\n@param dst Output vector of N-1-dimensional points.\n\nThe function converts points homogeneous to Euclidean space using perspective projection. That is,\neach point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the\noutput point coordinates will be (0,0,0,...).\n */\nCV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );\n\n/** @brief Converts points to/from homogeneous coordinates.\n\n@param src Input array or vector of 2D, 3D, or 4D points.\n@param dst Output vector of 2D, 3D, or 4D points.\n\nThe function converts 2D or 3D points from/to homogeneous coordinates by calling either\nconvertPointsToHomogeneous or convertPointsFromHomogeneous.\n\n@note The function is obsolete. Use one of the previous two functions instead.\n */\nCV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );\n\n/** @brief Calculates a fundamental matrix from the corresponding points in two images.\n\n@param points1 Array of N points from the first image. The point coordinates should be\nfloating-point (single or double precision).\n@param points2 Array of the second image points of the same size and format as points1 .\n@param method Method for computing a fundamental matrix.\n-   **CV_FM_7POINT** for a 7-point algorithm. \\f$N = 7\\f$\n-   **CV_FM_8POINT** for an 8-point algorithm. \\f$N \\ge 8\\f$\n-   **CV_FM_RANSAC** for the RANSAC algorithm. \\f$N \\ge 8\\f$\n-   **CV_FM_LMEDS** for the LMedS algorithm. \\f$N \\ge 8\\f$\n@param param1 Parameter used for RANSAC. It is the maximum distance from a point to an epipolar\nline in pixels, beyond which the point is considered an outlier and is not used for computing the\nfinal fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the\npoint localization, image resolution, and the image noise.\n@param param2 Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level\nof confidence (probability) that the estimated matrix is correct.\n@param mask\n\nThe epipolar geometry is described by the following equation:\n\n\\f[[p_2; 1]^T F [p_1; 1] = 0\\f]\n\nwhere \\f$F\\f$ is a fundamental matrix, \\f$p_1\\f$ and \\f$p_2\\f$ are corresponding points in the first and the\nsecond images, respectively.\n\nThe function calculates the fundamental matrix using one of four methods listed above and returns\nthe found fundamental matrix. Normally just one matrix is found. But in case of the 7-point\nalgorithm, the function may return up to 3 solutions ( \\f$9 \\times 3\\f$ matrix that stores all 3\nmatrices sequentially).\n\nThe calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the\nepipolar lines corresponding to the specified points. It can also be passed to\nstereoRectifyUncalibrated to compute the rectification transformation. :\n@code\n    // Example. Estimation of fundamental matrix using the RANSAC algorithm\n    int point_count = 100;\n    vector<Point2f> points1(point_count);\n    vector<Point2f> points2(point_count);\n\n    // initialize the points here ...\n    for( int i = 0; i < point_count; i++ )\n    {\n        points1[i] = ...;\n        points2[i] = ...;\n    }\n\n    Mat fundamental_matrix =\n     findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);\n@endcode\n */\nCV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,\n                                     int method = FM_RANSAC,\n                                     double param1 = 3., double param2 = 0.99,\n                                     OutputArray mask = noArray() );\n\n/** @overload */\nCV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,\n                                   OutputArray mask, int method = FM_RANSAC,\n                                   double param1 = 3., double param2 = 0.99 );\n\n/** @brief Calculates an essential matrix from the corresponding points in two images.\n\n@param points1 Array of N (N \\>= 5) 2D points from the first image. The point coordinates should\nbe floating-point (single or double precision).\n@param points2 Array of the second image points of the same size and format as points1 .\n@param cameraMatrix Camera matrix \\f$K = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ .\nNote that this function assumes that points1 and points2 are feature points from cameras with the\nsame camera matrix.\n@param method Method for computing a fundamental matrix.\n-   **RANSAC** for the RANSAC algorithm.\n-   **MEDS** for the LMedS algorithm.\n@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar\nline in pixels, beyond which the point is considered an outlier and is not used for computing the\nfinal fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the\npoint localization, image resolution, and the image noise.\n@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of\nconfidence (probability) that the estimated matrix is correct.\n@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1\nfor the other points. The array is computed only in the RANSAC and LMedS methods.\n\nThis function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 .\n@cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation:\n\n\\f[[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\\f]\n\nwhere \\f$E\\f$ is an essential matrix, \\f$p_1\\f$ and \\f$p_2\\f$ are corresponding points in the first and the\nsecond images, respectively. The result of this function may be passed further to\ndecomposeEssentialMat or recoverPose to recover the relative pose between cameras.\n */\nCV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,\n                                 InputArray cameraMatrix, int method = RANSAC,\n                                 double prob = 0.999, double threshold = 1.0,\n                                 OutputArray mask = noArray() );\n\n/** @overload\n@param points1 Array of N (N \\>= 5) 2D points from the first image. The point coordinates should\nbe floating-point (single or double precision).\n@param points2 Array of the second image points of the same size and format as points1 .\n@param focal focal length of the camera. Note that this function assumes that points1 and points2\nare feature points from cameras with same focal length and principle point.\n@param pp principle point of the camera.\n@param method Method for computing a fundamental matrix.\n-   **RANSAC** for the RANSAC algorithm.\n-   **LMEDS** for the LMedS algorithm.\n@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar\nline in pixels, beyond which the point is considered an outlier and is not used for computing the\nfinal fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the\npoint localization, image resolution, and the image noise.\n@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of\nconfidence (probability) that the estimated matrix is correct.\n@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1\nfor the other points. The array is computed only in the RANSAC and LMedS methods.\n\nThis function differs from the one above that it computes camera matrix from focal length and\nprincipal point:\n\n\\f[K =\n\\begin{bmatrix}\nf & 0 & x_{pp}  \\\\\n0 & f & y_{pp}  \\\\\n0 & 0 & 1\n\\end{bmatrix}\\f]\n */\nCV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,\n                                 double focal = 1.0, Point2d pp = Point2d(0, 0),\n                                 int method = RANSAC, double prob = 0.999,\n                                 double threshold = 1.0, OutputArray mask = noArray() );\n\n/** @brief Decompose an essential matrix to possible rotations and translation.\n\n@param E The input essential matrix.\n@param R1 One possible rotation matrix.\n@param R2 Another possible rotation matrix.\n@param t One possible translation.\n\nThis function decompose an essential matrix E using svd decomposition @cite HartleyZ00 . Generally 4\npossible poses exists for a given E. They are \\f$[R_1, t]\\f$, \\f$[R_1, -t]\\f$, \\f$[R_2, t]\\f$, \\f$[R_2, -t]\\f$. By\ndecomposing E, you can only get the direction of the translation, so the function returns unit t.\n */\nCV_EXPORTS_W void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );\n\n/** @brief Recover relative camera rotation and translation from an estimated essential matrix and the\ncorresponding points in two images, using cheirality check. Returns the number of inliers which pass\nthe check.\n\n@param E The input essential matrix.\n@param points1 Array of N 2D points from the first image. The point coordinates should be\nfloating-point (single or double precision).\n@param points2 Array of the second image points of the same size and format as points1 .\n@param cameraMatrix Camera matrix \\f$K = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ .\nNote that this function assumes that points1 and points2 are feature points from cameras with the\nsame camera matrix.\n@param R Recovered relative rotation.\n@param t Recoverd relative translation.\n@param mask Input/output mask for inliers in points1 and points2.\n:   If it is not empty, then it marks inliers in points1 and points2 for then given essential\nmatrix E. Only these inliers will be used to recover pose. In the output mask only inliers\nwhich pass the cheirality check.\nThis function decomposes an essential matrix using decomposeEssentialMat and then verifies possible\npose hypotheses by doing cheirality check. The cheirality check basically means that the\ntriangulated 3D points should have positive depth. Some details can be found in @cite Nister03 .\n\nThis function can be used to process output E and mask from findEssentialMat. In this scenario,\npoints1 and points2 are the same input for findEssentialMat. :\n@code\n    // Example. Estimation of fundamental matrix using the RANSAC algorithm\n    int point_count = 100;\n    vector<Point2f> points1(point_count);\n    vector<Point2f> points2(point_count);\n\n    // initialize the points here ...\n    for( int i = 0; i < point_count; i++ )\n    {\n        points1[i] = ...;\n        points2[i] = ...;\n    }\n\n    // cametra matrix with both focal lengths = 1, and principal point = (0, 0)\n    Mat cameraMatrix = Mat::eye(3, 3, CV_64F);\n\n    Mat E, R, t, mask;\n\n    E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);\n    recoverPose(E, points1, points2, cameraMatrix, R, t, mask);\n@endcode\n */\nCV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,\n                            InputArray cameraMatrix, OutputArray R, OutputArray t,\n                            InputOutputArray mask = noArray() );\n\n/** @overload\n@param E The input essential matrix.\n@param points1 Array of N 2D points from the first image. The point coordinates should be\nfloating-point (single or double precision).\n@param points2 Array of the second image points of the same size and format as points1 .\n@param R Recovered relative rotation.\n@param t Recoverd relative translation.\n@param focal Focal length of the camera. Note that this function assumes that points1 and points2\nare feature points from cameras with same focal length and principle point.\n@param pp Principle point of the camera.\n@param mask Input/output mask for inliers in points1 and points2.\n:   If it is not empty, then it marks inliers in points1 and points2 for then given essential\nmatrix E. Only these inliers will be used to recover pose. In the output mask only inliers\nwhich pass the cheirality check.\n\nThis function differs from the one above that it computes camera matrix from focal length and\nprincipal point:\n\n\\f[K =\n\\begin{bmatrix}\nf & 0 & x_{pp}  \\\\\n0 & f & y_{pp}  \\\\\n0 & 0 & 1\n\\end{bmatrix}\\f]\n */\nCV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,\n                            OutputArray R, OutputArray t,\n                            double focal = 1.0, Point2d pp = Point2d(0, 0),\n                            InputOutputArray mask = noArray() );\n\n/** @brief For points in an image of a stereo pair, computes the corresponding epilines in the other image.\n\n@param points Input points. \\f$N \\times 1\\f$ or \\f$1 \\times N\\f$ matrix of type CV_32FC2 or\nvector\\<Point2f\\> .\n@param whichImage Index of the image (1 or 2) that contains the points .\n@param F Fundamental matrix that can be estimated using findFundamentalMat or stereoRectify .\n@param lines Output vector of the epipolar lines corresponding to the points in the other image.\nEach line \\f$ax + by + c=0\\f$ is encoded by 3 numbers \\f$(a, b, c)\\f$ .\n\nFor every point in one of the two images of a stereo pair, the function finds the equation of the\ncorresponding epipolar line in the other image.\n\nFrom the fundamental matrix definition (see findFundamentalMat ), line \\f$l^{(2)}_i\\f$ in the second\nimage for the point \\f$p^{(1)}_i\\f$ in the first image (when whichImage=1 ) is computed as:\n\n\\f[l^{(2)}_i = F p^{(1)}_i\\f]\n\nAnd vice versa, when whichImage=2, \\f$l^{(1)}_i\\f$ is computed from \\f$p^{(2)}_i\\f$ as:\n\n\\f[l^{(1)}_i = F^T p^{(2)}_i\\f]\n\nLine coefficients are defined up to a scale. They are normalized so that \\f$a_i^2+b_i^2=1\\f$ .\n */\nCV_EXPORTS_W void computeCorrespondEpilines( InputArray points, int whichImage,\n                                             InputArray F, OutputArray lines );\n\n/** @brief Reconstructs points by triangulation.\n\n@param projMatr1 3x4 projection matrix of the first camera.\n@param projMatr2 3x4 projection matrix of the second camera.\n@param projPoints1 2xN array of feature points in the first image. In case of c++ version it can\nbe also a vector of feature points or two-channel matrix of size 1xN or Nx1.\n@param projPoints2 2xN array of corresponding points in the second image. In case of c++ version\nit can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.\n@param points4D 4xN array of reconstructed points in homogeneous coordinates.\n\nThe function reconstructs 3-dimensional points (in homogeneous coordinates) by using their\nobservations with a stereo camera. Projections matrices can be obtained from stereoRectify.\n\n@note\n   Keep in mind that all input data should be of float type in order for this function to work.\n\n@sa\n   reprojectImageTo3D\n */\nCV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,\n                                     InputArray projPoints1, InputArray projPoints2,\n                                     OutputArray points4D );\n\n/** @brief Refines coordinates of corresponding points.\n\n@param F 3x3 fundamental matrix.\n@param points1 1xN array containing the first set of points.\n@param points2 1xN array containing the second set of points.\n@param newPoints1 The optimized points1.\n@param newPoints2 The optimized points2.\n\nThe function implements the Optimal Triangulation Method (see Multiple View Geometry for details).\nFor each given point correspondence points1[i] \\<-\\> points2[i], and a fundamental matrix F, it\ncomputes the corrected correspondences newPoints1[i] \\<-\\> newPoints2[i] that minimize the geometric\nerror \\f$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\\f$ (where \\f$d(a,b)\\f$ is the\ngeometric distance between points \\f$a\\f$ and \\f$b\\f$ ) subject to the epipolar constraint\n\\f$newPoints2^T * F * newPoints1 = 0\\f$ .\n */\nCV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,\n                                  OutputArray newPoints1, OutputArray newPoints2 );\n\n/** @brief Filters off small noise blobs (speckles) in the disparity map\n\n@param img The input 16-bit signed disparity image\n@param newVal The disparity value used to paint-off the speckles\n@param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not\naffected by the algorithm\n@param maxDiff Maximum difference between neighbor disparity pixels to put them into the same\nblob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point\ndisparity map, where disparity values are multiplied by 16, this scale factor should be taken into\naccount when specifying this parameter value.\n@param buf The optional temporary buffer to avoid memory allocation within the function.\n */\nCV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,\n                                  int maxSpeckleSize, double maxDiff,\n                                  InputOutputArray buf = noArray() );\n\n//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())\nCV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,\n                                        int minDisparity, int numberOfDisparities,\n                                        int SADWindowSize );\n\n//! validates disparity using the left-right check. The matrix \"cost\" should be computed by the stereo correspondence algorithm\nCV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,\n                                     int minDisparity, int numberOfDisparities,\n                                     int disp12MaxDisp = 1 );\n\n/** @brief Reprojects a disparity image to 3D space.\n\n@param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit\nfloating-point disparity image. If 16-bit signed format is used, the values are assumed to have no\nfractional bits.\n@param _3dImage Output 3-channel floating-point image of the same size as disparity . Each\nelement of _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity\nmap.\n@param Q \\f$4 \\times 4\\f$ perspective transformation matrix that can be obtained with stereoRectify.\n@param handleMissingValues Indicates, whether the function should handle missing values (i.e.\npoints where the disparity was not computed). If handleMissingValues=true, then pixels with the\nminimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed\nto 3D points with a very large Z value (currently set to 10000).\n@param ddepth The optional output array depth. If it is -1, the output image will have CV_32F\ndepth. ddepth can also be set to CV_16S, CV_32S or CV_32F.\n\nThe function transforms a single-channel disparity map to a 3-channel image representing a 3D\nsurface. That is, for each pixel (x,y) andthe corresponding disparity d=disparity(x,y) , it\ncomputes:\n\n\\f[\\begin{array}{l} [X \\; Y \\; Z \\; W]^T =  \\texttt{Q} *[x \\; y \\; \\texttt{disparity} (x,y) \\; 1]^T  \\\\ \\texttt{\\_3dImage} (x,y) = (X/W, \\; Y/W, \\; Z/W) \\end{array}\\f]\n\nThe matrix Q can be an arbitrary \\f$4 \\times 4\\f$ matrix (for example, the one computed by\nstereoRectify). To reproject a sparse set of points {(x,y,d),...} to 3D space, use\nperspectiveTransform .\n */\nCV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,\n                                      OutputArray _3dImage, InputArray Q,\n                                      bool handleMissingValues = false,\n                                      int ddepth = -1 );\n\n/** @brief Calculates the Sampson Distance between two points.\n\nThe function sampsonDistance calculates and returns the first order approximation of the geometric error as:\n\\f[sd( \\texttt{pt1} , \\texttt{pt2} )= \\frac{(\\texttt{pt2}^t \\cdot \\texttt{F} \\cdot \\texttt{pt1})^2}{(\\texttt{F} \\cdot \\texttt{pt1})(0) + (\\texttt{F} \\cdot \\texttt{pt1})(1) + (\\texttt{F}^t \\cdot \\texttt{pt2})(0) + (\\texttt{F}^t \\cdot \\texttt{pt2})(1)}\\f]\nThe fundamental matrix may be calculated using the cv::findFundamentalMat function. See HZ 11.4.3 for details.\n@param pt1 first homogeneous 2d point\n@param pt2 second homogeneous 2d point\n@param F fundamental matrix\n*/\nCV_EXPORTS_W double sampsonDistance(InputArray pt1, InputArray pt2, InputArray F);\n\n/** @brief Computes an optimal affine transformation between two 3D point sets.\n\n@param src First input 3D point set.\n@param dst Second input 3D point set.\n@param out Output 3D affine transformation matrix \\f$3 \\times 4\\f$ .\n@param inliers Output vector indicating which points are inliers.\n@param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as\nan inlier.\n@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything\nbetween 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation\nsignificantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.\n\nThe function estimates an optimal 3D affine transformation between two 3D point sets using the\nRANSAC algorithm.\n */\nCV_EXPORTS_W  int estimateAffine3D(InputArray src, InputArray dst,\n                                   OutputArray out, OutputArray inliers,\n                                   double ransacThreshold = 3, double confidence = 0.99);\n\n/** @brief Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).\n\n@param H The input homography matrix between two images.\n@param K The input intrinsic camera calibration matrix.\n@param rotations Array of rotation matrices.\n@param translations Array of translation matrices.\n@param normals Array of plane normal matrices.\n\nThis function extracts relative camera motion between two views observing a planar object from the\nhomography H induced by the plane. The intrinsic camera matrix K must also be provided. The function\nmay return up to four mathematical solution sets. At least two of the solutions may further be\ninvalidated if point correspondences are available by applying positive depth constraint (all points\nmust be in front of the camera). The decomposition method is described in detail in @cite Malis .\n */\nCV_EXPORTS_W int decomposeHomographyMat(InputArray H,\n                                        InputArray K,\n                                        OutputArrayOfArrays rotations,\n                                        OutputArrayOfArrays translations,\n                                        OutputArrayOfArrays normals);\n\n/** @brief The base class for stereo correspondence algorithms.\n */\nclass CV_EXPORTS_W StereoMatcher : public Algorithm\n{\npublic:\n    enum { DISP_SHIFT = 4,\n           DISP_SCALE = (1 << DISP_SHIFT)\n         };\n\n    /** @brief Computes disparity map for the specified stereo pair\n\n    @param left Left 8-bit single-channel image.\n    @param right Right image of the same size and the same type as the left one.\n    @param disparity Output disparity map. It has the same size as the input images. Some algorithms,\n    like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value\n    has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.\n     */\n    CV_WRAP virtual void compute( InputArray left, InputArray right,\n                                  OutputArray disparity ) = 0;\n\n    CV_WRAP virtual int getMinDisparity() const = 0;\n    CV_WRAP virtual void setMinDisparity(int minDisparity) = 0;\n\n    CV_WRAP virtual int getNumDisparities() const = 0;\n    CV_WRAP virtual void setNumDisparities(int numDisparities) = 0;\n\n    CV_WRAP virtual int getBlockSize() const = 0;\n    CV_WRAP virtual void setBlockSize(int blockSize) = 0;\n\n    CV_WRAP virtual int getSpeckleWindowSize() const = 0;\n    CV_WRAP virtual void setSpeckleWindowSize(int speckleWindowSize) = 0;\n\n    CV_WRAP virtual int getSpeckleRange() const = 0;\n    CV_WRAP virtual void setSpeckleRange(int speckleRange) = 0;\n\n    CV_WRAP virtual int getDisp12MaxDiff() const = 0;\n    CV_WRAP virtual void setDisp12MaxDiff(int disp12MaxDiff) = 0;\n};\n\n\n/** @brief Class for computing stereo correspondence using the block matching algorithm, introduced and\ncontributed to OpenCV by K. Konolige.\n */\nclass CV_EXPORTS_W StereoBM : public StereoMatcher\n{\npublic:\n    enum { PREFILTER_NORMALIZED_RESPONSE = 0,\n           PREFILTER_XSOBEL              = 1\n         };\n\n    CV_WRAP virtual int getPreFilterType() const = 0;\n    CV_WRAP virtual void setPreFilterType(int preFilterType) = 0;\n\n    CV_WRAP virtual int getPreFilterSize() const = 0;\n    CV_WRAP virtual void setPreFilterSize(int preFilterSize) = 0;\n\n    CV_WRAP virtual int getPreFilterCap() const = 0;\n    CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;\n\n    CV_WRAP virtual int getTextureThreshold() const = 0;\n    CV_WRAP virtual void setTextureThreshold(int textureThreshold) = 0;\n\n    CV_WRAP virtual int getUniquenessRatio() const = 0;\n    CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0;\n\n    CV_WRAP virtual int getSmallerBlockSize() const = 0;\n    CV_WRAP virtual void setSmallerBlockSize(int blockSize) = 0;\n\n    CV_WRAP virtual Rect getROI1() const = 0;\n    CV_WRAP virtual void setROI1(Rect roi1) = 0;\n\n    CV_WRAP virtual Rect getROI2() const = 0;\n    CV_WRAP virtual void setROI2(Rect roi2) = 0;\n\n    /** @brief Creates StereoBM object\n\n    @param numDisparities the disparity search range. For each pixel algorithm will find the best\n    disparity from 0 (default minimum disparity) to numDisparities. The search range can then be\n    shifted by changing the minimum disparity.\n    @param blockSize the linear size of the blocks compared by the algorithm. The size should be odd\n    (as the block is centered at the current pixel). Larger block size implies smoother, though less\n    accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher\n    chance for algorithm to find a wrong correspondence.\n\n    The function create StereoBM object. You can then call StereoBM::compute() to compute disparity for\n    a specific stereo pair.\n     */\n    CV_WRAP static Ptr<StereoBM> create(int numDisparities = 0, int blockSize = 21);\n};\n\n/** @brief The class implements the modified H. Hirschmuller algorithm @cite HH08 that differs from the original\none as follows:\n\n-   By default, the algorithm is single-pass, which means that you consider only 5 directions\ninstead of 8. Set mode=StereoSGBM::MODE_HH in createStereoSGBM to run the full variant of the\nalgorithm but beware that it may consume a lot of memory.\n-   The algorithm matches blocks, not individual pixels. Though, setting blockSize=1 reduces the\nblocks to single pixels.\n-   Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi\nsub-pixel metric from @cite BT98 is used. Though, the color images are supported as well.\n-   Some pre- and post- processing steps from K. Konolige algorithm StereoBM are included, for\nexample: pre-filtering (StereoBM::PREFILTER_XSOBEL type) and post-filtering (uniqueness\ncheck, quadratic interpolation and speckle filtering).\n\n@note\n   -   (Python) An example illustrating the use of the StereoSGBM matching algorithm can be found\n        at opencv_source_code/samples/python/stereo_match.py\n */\nclass CV_EXPORTS_W StereoSGBM : public StereoMatcher\n{\npublic:\n    enum\n    {\n        MODE_SGBM = 0,\n        MODE_HH   = 1,\n        MODE_SGBM_3WAY = 2\n    };\n\n    CV_WRAP virtual int getPreFilterCap() const = 0;\n    CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;\n\n    CV_WRAP virtual int getUniquenessRatio() const = 0;\n    CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0;\n\n    CV_WRAP virtual int getP1() const = 0;\n    CV_WRAP virtual void setP1(int P1) = 0;\n\n    CV_WRAP virtual int getP2() const = 0;\n    CV_WRAP virtual void setP2(int P2) = 0;\n\n    CV_WRAP virtual int getMode() const = 0;\n    CV_WRAP virtual void setMode(int mode) = 0;\n\n    /** @brief Creates StereoSGBM object\n\n    @param minDisparity Minimum possible disparity value. Normally, it is zero but sometimes\n    rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.\n    @param numDisparities Maximum disparity minus minimum disparity. The value is always greater than\n    zero. In the current implementation, this parameter must be divisible by 16.\n    @param blockSize Matched block size. It must be an odd number \\>=1 . Normally, it should be\n    somewhere in the 3..11 range.\n    @param P1 The first parameter controlling the disparity smoothness. See below.\n    @param P2 The second parameter controlling the disparity smoothness. The larger the values are,\n    the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1\n    between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor\n    pixels. The algorithm requires P2 \\> P1 . See stereo_match.cpp sample where some reasonably good\n    P1 and P2 values are shown (like 8\\*number_of_image_channels\\*SADWindowSize\\*SADWindowSize and\n    32\\*number_of_image_channels\\*SADWindowSize\\*SADWindowSize , respectively).\n    @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right\n    disparity check. Set it to a non-positive value to disable the check.\n    @param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first\n    computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval.\n    The result values are passed to the Birchfield-Tomasi pixel cost function.\n    @param uniquenessRatio Margin in percentage by which the best (minimum) computed cost function\n    value should \"win\" the second best value to consider the found match correct. Normally, a value\n    within the 5-15 range is good enough.\n    @param speckleWindowSize Maximum size of smooth disparity regions to consider their noise speckles\n    and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the\n    50-200 range.\n    @param speckleRange Maximum disparity variation within each connected component. If you do speckle\n    filtering, set the parameter to a positive value, it will be implicitly multiplied by 16.\n    Normally, 1 or 2 is good enough.\n    @param mode Set it to StereoSGBM::MODE_HH to run the full-scale two-pass dynamic programming\n    algorithm. It will consume O(W\\*H\\*numDisparities) bytes, which is large for 640x480 stereo and\n    huge for HD-size pictures. By default, it is set to false .\n\n    The first constructor initializes StereoSGBM with all the default parameters. So, you only have to\n    set StereoSGBM::numDisparities at minimum. The second constructor enables you to set each parameter\n    to a custom value.\n     */\n    CV_WRAP static Ptr<StereoSGBM> create(int minDisparity, int numDisparities, int blockSize,\n                                          int P1 = 0, int P2 = 0, int disp12MaxDiff = 0,\n                                          int preFilterCap = 0, int uniquenessRatio = 0,\n                                          int speckleWindowSize = 0, int speckleRange = 0,\n                                          int mode = StereoSGBM::MODE_SGBM);\n};\n\n//! @} calib3d\n\n/** @brief The methods in this namespace use a so-called fisheye camera model.\n  @ingroup calib3d_fisheye\n*/\nnamespace fisheye\n{\n//! @addtogroup calib3d_fisheye\n//! @{\n\n    enum{\n        CALIB_USE_INTRINSIC_GUESS   = 1,\n        CALIB_RECOMPUTE_EXTRINSIC   = 2,\n        CALIB_CHECK_COND            = 4,\n        CALIB_FIX_SKEW              = 8,\n        CALIB_FIX_K1                = 16,\n        CALIB_FIX_K2                = 32,\n        CALIB_FIX_K3                = 64,\n        CALIB_FIX_K4                = 128,\n        CALIB_FIX_INTRINSIC         = 256\n    };\n\n    /** @brief Projects points using fisheye model\n\n    @param objectPoints Array of object points, 1xN/Nx1 3-channel (or vector\\<Point3f\\> ), where N is\n    the number of points in the view.\n    @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or\n    vector\\<Point2f\\>.\n    @param affine\n    @param K Camera matrix \\f$K = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$.\n    @param D Input vector of distortion coefficients \\f$(k_1, k_2, k_3, k_4)\\f$.\n    @param alpha The skew coefficient.\n    @param jacobian Optional output 2Nx15 jacobian matrix of derivatives of image points with respect\n    to components of the focal lengths, coordinates of the principal point, distortion coefficients,\n    rotation vector, translation vector, and the skew. In the old interface different components of\n    the jacobian are returned via different output parameters.\n\n    The function computes projections of 3D points to the image plane given intrinsic and extrinsic\n    camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of\n    image points coordinates (as functions of all the input parameters) with respect to the particular\n    parameters, intrinsic and/or extrinsic.\n     */\n    CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,\n        InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());\n\n    /** @overload */\n    CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,\n        InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());\n\n    /** @brief Distorts 2D points using fisheye model.\n\n    @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\\<Point2f\\> ), where N is\n    the number of points in the view.\n    @param K Camera matrix \\f$K = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$.\n    @param D Input vector of distortion coefficients \\f$(k_1, k_2, k_3, k_4)\\f$.\n    @param alpha The skew coefficient.\n    @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\\<Point2f\\> .\n     */\n    CV_EXPORTS_W void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);\n\n    /** @brief Undistorts 2D points using fisheye model\n\n    @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\\<Point2f\\> ), where N is the\n    number of points in the view.\n    @param K Camera matrix \\f$K = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$.\n    @param D Input vector of distortion coefficients \\f$(k_1, k_2, k_3, k_4)\\f$.\n    @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3\n    1-channel or 1x1 3-channel\n    @param P New camera matrix (3x3) or new projection matrix (3x4)\n    @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\\<Point2f\\> .\n     */\n    CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted,\n        InputArray K, InputArray D, InputArray R = noArray(), InputArray P  = noArray());\n\n    /** @brief Computes undistortion and rectification maps for image transform by cv::remap(). If D is empty zero\n    distortion is used, if R or P is empty identity matrixes are used.\n\n    @param K Camera matrix \\f$K = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$.\n    @param D Input vector of distortion coefficients \\f$(k_1, k_2, k_3, k_4)\\f$.\n    @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3\n    1-channel or 1x1 3-channel\n    @param P New camera matrix (3x3) or new projection matrix (3x4)\n    @param size Undistorted image size.\n    @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()\n    for details.\n    @param map1 The first output map.\n    @param map2 The second output map.\n     */\n    CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P,\n        const cv::Size& size, int m1type, OutputArray map1, OutputArray map2);\n\n    /** @brief Transforms an image to compensate for fisheye lens distortion.\n\n    @param distorted image with fisheye lens distortion.\n    @param undistorted Output image with compensated fisheye lens distortion.\n    @param K Camera matrix \\f$K = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$.\n    @param D Input vector of distortion coefficients \\f$(k_1, k_2, k_3, k_4)\\f$.\n    @param Knew Camera matrix of the distorted image. By default, it is the identity matrix but you\n    may additionally scale and shift the result by using a different matrix.\n    @param new_size\n\n    The function transforms an image to compensate radial and tangential lens distortion.\n\n    The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap\n    (with bilinear interpolation). See the former function for details of the transformation being\n    performed.\n\n    See below the results of undistortImage.\n       -   a\\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,\n            k_4, k_5, k_6) of distortion were optimized under calibration)\n        -   b\\) result of fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,\n            k_3, k_4) of fisheye distortion were optimized under calibration)\n        -   c\\) original image was captured with fisheye lens\n\n    Pictures a) and b) almost the same. But if we consider points of image located far from the center\n    of image, we can notice that on image a) these points are distorted.\n\n    ![image](pics/fisheye_undistorted.jpg)\n     */\n    CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted,\n        InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size());\n\n    /** @brief Estimates new camera matrix for undistortion or rectification.\n\n    @param K Camera matrix \\f$K = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$.\n    @param image_size\n    @param D Input vector of distortion coefficients \\f$(k_1, k_2, k_3, k_4)\\f$.\n    @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3\n    1-channel or 1x1 3-channel\n    @param P New camera matrix (3x3) or new projection matrix (3x4)\n    @param balance Sets the new focal length in range between the min focal length and the max focal\n    length. Balance is in range of [0, 1].\n    @param new_size\n    @param fov_scale Divisor for new focal length.\n     */\n    CV_EXPORTS_W void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,\n        OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0);\n\n    /** @brief Performs camera calibaration\n\n    @param objectPoints vector of vectors of calibration pattern points in the calibration pattern\n    coordinate space.\n    @param imagePoints vector of vectors of the projections of calibration pattern points.\n    imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to\n    objectPoints[i].size() for each i.\n    @param image_size Size of the image used only to initialize the intrinsic camera matrix.\n    @param K Output 3x3 floating-point camera matrix\n    \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ . If\n    fisheye::CALIB_USE_INTRINSIC_GUESS/ is specified, some or all of fx, fy, cx, cy must be\n    initialized before calling the function.\n    @param D Output vector of distortion coefficients \\f$(k_1, k_2, k_3, k_4)\\f$.\n    @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.\n    That is, each k-th rotation vector together with the corresponding k-th translation vector (see\n    the next output parameter description) brings the calibration pattern from the model coordinate\n    space (in which object points are specified) to the world coordinate space, that is, a real\n    position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).\n    @param tvecs Output vector of translation vectors estimated for each pattern view.\n    @param flags Different flags that may be zero or a combination of the following values:\n    -   **fisheye::CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of\n    fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image\n    center ( imageSize is used), and focal distances are computed in a least-squares fashion.\n    -   **fisheye::CALIB_RECOMPUTE_EXTRINSIC** Extrinsic will be recomputed after each iteration\n    of intrinsic optimization.\n    -   **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.\n    -   **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.\n    -   **fisheye::CALIB_FIX_K1..4** Selected distortion coefficients are set to zeros and stay\n    zero.\n    @param criteria Termination criteria for the iterative optimization algorithm.\n     */\n    CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,\n        InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,\n            TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));\n\n    /** @brief Stereo rectification for fisheye camera model\n\n    @param K1 First camera matrix.\n    @param D1 First camera distortion parameters.\n    @param K2 Second camera matrix.\n    @param D2 Second camera distortion parameters.\n    @param imageSize Size of the image used for stereo calibration.\n    @param R Rotation matrix between the coordinate systems of the first and the second\n    cameras.\n    @param tvec Translation vector between coordinate systems of the cameras.\n    @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.\n    @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.\n    @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first\n    camera.\n    @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second\n    camera.\n    @param Q Output \\f$4 \\times 4\\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).\n    @param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY . If the flag is set,\n    the function makes the principal points of each camera have the same pixel coordinates in the\n    rectified views. And if the flag is not set, the function may still shift the images in the\n    horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the\n    useful image area.\n    @param newImageSize New image resolution after rectification. The same size should be passed to\n    initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)\n    is passed (default), it is set to the original imageSize . Setting it to larger value can help you\n    preserve details in the original image, especially when there is a big radial distortion.\n    @param balance Sets the new focal length in range between the min focal length and the max focal\n    length. Balance is in range of [0, 1].\n    @param fov_scale Divisor for new focal length.\n     */\n    CV_EXPORTS_W void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,\n        OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(),\n        double balance = 0.0, double fov_scale = 1.0);\n\n    /** @brief Performs stereo calibration\n\n    @param objectPoints Vector of vectors of the calibration pattern points.\n    @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,\n    observed by the first camera.\n    @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,\n    observed by the second camera.\n    @param K1 Input/output first camera matrix:\n    \\f$\\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\\f$ , \\f$j = 0,\\, 1\\f$ . If\n    any of fisheye::CALIB_USE_INTRINSIC_GUESS , fisheye::CV_CALIB_FIX_INTRINSIC are specified,\n    some or all of the matrix components must be initialized.\n    @param D1 Input/output vector of distortion coefficients \\f$(k_1, k_2, k_3, k_4)\\f$ of 4 elements.\n    @param K2 Input/output second camera matrix. The parameter is similar to K1 .\n    @param D2 Input/output lens distortion coefficients for the second camera. The parameter is\n    similar to D1 .\n    @param imageSize Size of the image used only to initialize intrinsic camera matrix.\n    @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.\n    @param T Output translation vector between the coordinate systems of the cameras.\n    @param flags Different flags that may be zero or a combination of the following values:\n    -   **fisheye::CV_CALIB_FIX_INTRINSIC** Fix K1, K2? and D1, D2? so that only R, T matrices\n    are estimated.\n    -   **fisheye::CALIB_USE_INTRINSIC_GUESS** K1, K2 contains valid initial values of\n    fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image\n    center (imageSize is used), and focal distances are computed in a least-squares fashion.\n    -   **fisheye::CALIB_RECOMPUTE_EXTRINSIC** Extrinsic will be recomputed after each iteration\n    of intrinsic optimization.\n    -   **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.\n    -   **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.\n    -   **fisheye::CALIB_FIX_K1..4** Selected distortion coefficients are set to zeros and stay\n    zero.\n    @param criteria Termination criteria for the iterative optimization algorithm.\n     */\n    CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,\n                                  InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,\n                                  OutputArray R, OutputArray T, int flags = fisheye::CALIB_FIX_INTRINSIC,\n                                  TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));\n\n//! @} calib3d_fisheye\n}\n\n} // cv\n\n#ifndef DISABLE_OPENCV_24_COMPATIBILITY\n#include \"opencv2/calib3d/calib3d_c.h\"\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ccalib/multicalib.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, Baisheng Lai (laibaisheng@gmail.com), Zhejiang University,\n// all rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_MULTICAMERACALIBRATION_HPP__\n#define __OPENCV_MULTICAMERACALIBRATION_HPP__\n\n#include \"opencv2/ccalib/randpattern.hpp\"\n#include \"opencv2/ccalib/omnidir.hpp\"\n#include <string>\n#include <iostream>\n\nnamespace cv { namespace multicalib {\n\n//! @addtogroup ccalib\n//! @{\n\n#define HEAD -1\n#define INVALID -2\n\n/** @brief Class for multiple camera calibration that supports pinhole camera and omnidirection camera.\nFor omnidirectional camera model, please refer to omnidir.hpp in ccalib module.\nIt first calibrate each camera individually, then a bundle adjustment like optimization is applied to\nrefine extrinsic parameters. So far, it only support \"random\" pattern for calibration,\nsee randomPattern.hpp in ccalib module for details.\nImages that are used should be named by \"cameraIdx-timestamp.*\", several images with the same timestamp\nmeans that they are the same pattern that are photographed. cameraIdx should start from 0.\n\nFor more details, please refer to paper\n    B. Li, L. Heng, K. Kevin  and M. Pollefeys, \"A Multiple-Camera System\n    Calibration Toolbox Using A Feature Descriptor-Based Calibration\n    Pattern\", in IROS 2013.\n*/\n\nclass CV_EXPORTS MultiCameraCalibration\n{\npublic:\n    enum {\n        PINHOLE,\n        OMNIDIRECTIONAL\n        //FISHEYE\n    };\n\n    // an edge connects a camera and pattern\n    struct edge\n    {\n        int cameraVertex;   // vertex index for camera in this edge\n        int photoVertex;    // vertex index for pattern in this edge\n        int photoIndex;     // photo index among photos for this camera\n        Mat transform;      // transform from pattern to camera\n\n        edge(int cv, int pv, int pi, Mat trans)\n        {\n            cameraVertex = cv;\n            photoVertex = pv;\n            photoIndex = pi;\n            transform = trans;\n        }\n    };\n\n    struct vertex\n    {\n        Mat pose;   // relative pose to the first camera. For camera vertex, it is the\n                    // transform from the first camera to this camera, for pattern vertex,\n                    // it is the transform from pattern to the first camera\n        int timestamp;  // timestamp of photo, only available for photo vertex\n\n        vertex(Mat po, int ts)\n        {\n            pose = po;\n            timestamp = ts;\n        }\n\n        vertex()\n        {\n            pose = Mat::eye(4, 4, CV_32F);\n            timestamp = -1;\n        }\n    };\n    /* @brief Constructor\n    @param cameraType camera type, PINHOLE or OMNIDIRECTIONAL\n    @param nCameras number of cameras\n    @fileName filename of string list that are used for calibration, the file is generated\n    by imagelist_creator from OpenCv samples. The first one in the list is the pattern filename.\n    @patternWidth the physical width of pattern, in user defined unit.\n    @patternHeight the physical height of pattern, in user defined unit.\n    @showExtration whether show extracted features and feature filtering.\n    @nMiniMatches minimal number of matched features for a frame.\n\t@flags Calibration flags\n    @criteria optimization stopping criteria.\n    @detector feature detector that detect feature points in pattern and images.\n    @descriptor feature descriptor.\n    @matcher feature matcher.\n    */\n    MultiCameraCalibration(int cameraType, int nCameras, const std::string& fileName, float patternWidth,\n        float patternHeight, int verbose = 0, int showExtration = 0, int nMiniMatches = 20, int flags = 0,\n        TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 200, 1e-7),\n        Ptr<FeatureDetector> detector = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.006f),\n        Ptr<DescriptorExtractor> descriptor = AKAZE::create(AKAZE::DESCRIPTOR_MLDB,0, 3, 0.006f),\n        Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(\"BruteForce-L1\"));\n\n    /* @brief load images\n    */\n    void loadImages();\n\n    /* @brief initialize multiple camera calibration. It calibrates each camera individually.\n    */\n    void initialize();\n\n    /* @brief optimization extrinsic parameters\n    */\n    double optimizeExtrinsics();\n\n    /* @brief run multi-camera camera calibration, it runs loadImage(), initialize() and optimizeExtrinsics()\n    */\n    double run();\n\n    /* @brief write camera parameters to file.\n    */\n    void writeParameters(const std::string& filename);\n\nprivate:\n    std::vector<std::string> readStringList();\n\n    int getPhotoVertex(int timestamp);\n\n    void graphTraverse(const Mat& G, int begin, std::vector<int>& order, std::vector<int>& pre);\n\n    void findRowNonZero(const Mat& row, Mat& idx);\n\n    void computeJacobianExtrinsic(const Mat& extrinsicParams, Mat& JTJ_inv, Mat& JTE);\n\n    void computePhotoCameraJacobian(const Mat& rvecPhoto, const Mat& tvecPhoto, const Mat& rvecCamera,\n        const Mat& tvecCamera, Mat& rvecTran, Mat& tvecTran, const Mat& objectPoints, const Mat& imagePoints, const Mat& K,\n        const Mat& distort, const Mat& xi, Mat& jacobianPhoto, Mat& jacobianCamera, Mat& E);\n\n    void compose_motion(InputArray _om1, InputArray _T1, InputArray _om2, InputArray _T2, Mat& om3, Mat& T3, Mat& dom3dom1,\n        Mat& dom3dT1, Mat& dom3dom2, Mat& dom3dT2, Mat& dT3dom1, Mat& dT3dT1, Mat& dT3dom2, Mat& dT3dT2);\n\n    void JRodriguesMatlab(const Mat& src, Mat& dst);\n    void dAB(InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB);\n\n    double computeProjectError(Mat& parameters);\n\n    void vector2parameters(const Mat& parameters, std::vector<Vec3f>& rvecVertex, std::vector<Vec3f>& tvecVertexs);\n    void parameters2vector(const std::vector<Vec3f>& rvecVertex, const std::vector<Vec3f>& tvecVertex, Mat& parameters);\n\n    int _camType; //PINHOLE, FISHEYE or OMNIDIRECTIONAL\n    int _nCamera;\n    int _nMiniMatches;\n    int _flags;\n\tint _verbose;\n    double _error;\n    float _patternWidth, _patternHeight;\n    TermCriteria _criteria;\n    std::string _filename;\n    int _showExtraction;\n    Ptr<FeatureDetector> _detector;\n    Ptr<DescriptorExtractor> _descriptor;\n    Ptr<DescriptorMatcher> _matcher;\n\n    std::vector<edge> _edgeList;\n    std::vector<vertex> _vertexList;\n    std::vector<std::vector<cv::Mat> > _objectPointsForEachCamera;\n    std::vector<std::vector<cv::Mat> > _imagePointsForEachCamera;\n    std::vector<cv::Mat> _cameraMatrix;\n    std::vector<cv::Mat> _distortCoeffs;\n    std::vector<cv::Mat> _xi;\n    std::vector<std::vector<Mat> > _omEachCamera, _tEachCamera;\n};\n\n//! @}\n\n}} // namespace multicalib, cv\n#endif"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ccalib/omnidir.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, Baisheng Lai (laibaisheng@gmail.com), Zhejiang University,\n// all rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#include <opencv2/core.hpp>\n#include <vector>\n\n#ifndef __OPENCV_OMNIDIR_HPP__\n#define __OPENCV_OMNIDIR_HPP__\n\nnamespace cv\n{\nnamespace omnidir\n{\n    //! @addtogroup ccalib\n    //! @{\n\n    enum {\n        CALIB_USE_GUESS             = 1,\n        CALIB_FIX_SKEW              = 2,\n        CALIB_FIX_K1                = 4,\n        CALIB_FIX_K2                = 8,\n        CALIB_FIX_P1                = 16,\n        CALIB_FIX_P2                = 32,\n        CALIB_FIX_XI                = 64,\n        CALIB_FIX_GAMMA             = 128,\n        CALIB_FIX_CENTER            = 256\n    };\n\n    enum{\n        RECTIFY_PERSPECTIVE         = 1,\n        RECTIFY_CYLINDRICAL         = 2,\n        RECTIFY_LONGLATI            = 3,\n        RECTIFY_STEREOGRAPHIC       = 4\n    };\n\n    enum{\n        XYZRGB  = 1,\n        XYZ     = 2\n    };\n/**\n * This module was accepted as a GSoC 2015 project for OpenCV, authored by\n * Baisheng Lai, mentored by Bo Li.\n */\n\n    /** @brief Projects points for omnidirectional camera using CMei's model\n\n    @param objectPoints Object points in world coordinate, vector of vector of Vec3f or Mat of\n    1xN/Nx1 3-channel of type CV_32F and N is the number of points. 64F is also acceptable.\n    @param imagePoints Output array of image points, vector of vector of Vec2f or\n    1xN/Nx1 2-channel of type CV_32F. 64F is also acceptable.\n    @param rvec vector of rotation between world coordinate and camera coordinate, i.e., om\n    @param tvec vector of translation between pattern coordinate and camera coordinate\n    @param K Camera matrix \\f$K = \\vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$.\n    @param D Input vector of distortion coefficients \\f$(k_1, k_2, p_1, p_2)\\f$.\n    @param xi The parameter xi for CMei's model\n    @param jacobian Optional output 2Nx16 of type CV_64F jacobian matrix, contains the derivatives of\n    image pixel points wrt parameters including \\f$om, T, f_x, f_y, s, c_x, c_y, xi, k_1, k_2, p_1, p_2\\f$.\n    This matrix will be used in calibration by optimization.\n\n    The function projects object 3D points of world coordinate to image pixels, parameter by intrinsic\n    and extrinsic parameters. Also, it optionally compute a by-product: the jacobian matrix containing\n    contains the derivatives of image pixel points wrt intrinsic and extrinsic parameters.\n     */\n    CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,\n                       InputArray K, double xi, InputArray D, OutputArray jacobian = noArray());\n\n    /** @brief Undistort 2D image points for omnidirectional camera using CMei's model\n\n    @param distorted Array of distorted image points, vector of Vec2f\n    or 1xN/Nx1 2-channel Mat of type CV_32F, 64F depth is also acceptable\n    @param K Camera matrix \\f$K = \\vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$.\n    @param D Distortion coefficients \\f$(k_1, k_2, p_1, p_2)\\f$.\n    @param xi The parameter xi for CMei's model\n    @param R Rotation trainsform between the original and object space : 3x3 1-channel, or vector: 3x1/1x3\n    1-channel or 1x1 3-channel\n    @param undistorted array of normalized object points, vector of Vec2f/Vec2d or 1xN/Nx1 2-channel Mat with the same\n    depth of distorted points.\n     */\n    CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray xi, InputArray R);\n\n    /** @brief Computes undistortion and rectification maps for omnidirectional camera image transform by a rotation R.\n    It output two maps that are used for cv::remap(). If D is empty then zero distortion is used,\n    if R or P is empty then identity matrices are used.\n\n    @param K Camera matrix \\f$K = \\vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$, with depth CV_32F or CV_64F\n    @param D Input vector of distortion coefficients \\f$(k_1, k_2, p_1, p_2)\\f$, with depth CV_32F or CV_64F\n    @param xi The parameter xi for CMei's model\n    @param R Rotation transform between the original and object space : 3x3 1-channel, or vector: 3x1/1x3, with depth CV_32F or CV_64F\n    @param P New camera matrix (3x3) or new projection matrix (3x4)\n    @param size Undistorted image size.\n    @param mltype Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()\n    for details.\n    @param map1 The first output map.\n    @param map2 The second output map.\n    @param flags Flags indicates the rectification type,  RECTIFY_PERSPECTIVE, RECTIFY_CYLINDRICAL, RECTIFY_LONGLATI and RECTIFY_STEREOGRAPHIC\n    are supported.\n     */\n    CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray xi, InputArray R, InputArray P, const cv::Size& size,\n        int mltype, OutputArray map1, OutputArray map2, int flags);\n\n    /** @brief Undistort omnidirectional images to perspective images\n\n    @param distorted The input omnidirectional image.\n    @param undistorted The output undistorted image.\n    @param K Camera matrix \\f$K = \\vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\\f$.\n    @param D Input vector of distortion coefficients \\f$(k_1, k_2, p_1, p_2)\\f$.\n    @param xi The parameter xi for CMei's model.\n    @param flags Flags indicates the rectification type,  RECTIFY_PERSPECTIVE, RECTIFY_CYLINDRICAL, RECTIFY_LONGLATI and RECTIFY_STEREOGRAPHIC\n    @param Knew Camera matrix of the distorted image. If it is not assigned, it is just K.\n    @param new_size The new image size. By default, it is the size of distorted.\n    @param R Rotation matrix between the input and output images. By default, it is identity matrix.\n    */\n    CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray xi, int flags,\n        InputArray Knew = cv::noArray(), const Size& new_size = Size(), InputArray R = Mat::eye(3, 3, CV_64F));\n\n    /** @brief Perform omnidirectional camera calibration, the default depth of outputs is CV_64F.\n\n    @param objectPoints Vector of vector of Vec3f object points in world (pattern) coordinate.\n    It also can be vector of Mat with size 1xN/Nx1 and type CV_32FC3. Data with depth of 64_F is also acceptable.\n    @param imagePoints Vector of vector of Vec2f corresponding image points of objectPoints. It must be the same\n    size and the same type with objectPoints.\n    @param size Image size of calibration images.\n    @param K Output calibrated camera matrix.\n    @param xi Output parameter xi for CMei's model\n    @param D Output distortion parameters \\f$(k_1, k_2, p_1, p_2)\\f$\n    @param rvecs Output rotations for each calibration images\n    @param tvecs Output translation for each calibration images\n    @param flags The flags that control calibrate\n    @param criteria Termination criteria for optimization\n    @param idx Indices of images that pass initialization, which are really used in calibration. So the size of rvecs is the\n    same as idx.total().\n    */\n    CV_EXPORTS_W double calibrate(InputArray objectPoints, InputArray imagePoints, Size size,\n        InputOutputArray K, InputOutputArray xi, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,\n        int flags, TermCriteria criteria, OutputArray idx=noArray());\n\n    /** @brief Stereo calibration for omnidirectional camera model. It computes the intrinsic parameters for two\n    cameras and the extrinsic parameters between two cameras. The default depth of outputs is CV_64F.\n\n    @param objectPoints Object points in world (pattern) coordinate. Its type is vector<vector<Vec3f> >.\n    It also can be vector of Mat with size 1xN/Nx1 and type CV_32FC3. Data with depth of 64_F is also acceptable.\n    @param imagePoints1 The corresponding image points of the first camera, with type vector<vector<Vec2f> >.\n    It must be the same size and the same type as objectPoints.\n    @param imagePoints2 The corresponding image points of the second camera, with type vector<vector<Vec2f> >.\n    It must be the same size and the same type as objectPoints.\n    @param imageSize1 Image size of calibration images of the first camera.\n    @param imageSize2 Image size of calibration images of the second camera.\n    @param K1 Output camera matrix for the first camera.\n    @param xi1 Output parameter xi of Mei's model for the first camera\n    @param D1 Output distortion parameters \\f$(k_1, k_2, p_1, p_2)\\f$ for the first camera\n    @param K2 Output camera matrix for the first camera.\n    @param xi2 Output parameter xi of CMei's model for the second camera\n    @param D2 Output distortion parameters \\f$(k_1, k_2, p_1, p_2)\\f$ for the second camera\n    @param rvec Output rotation between the first and second camera\n    @param tvec Output translation between the first and second camera\n    @param rvecsL Output rotation for each image of the first camera\n    @param tvecsL Output translation for each image of the first camera\n    @param flags The flags that control stereoCalibrate\n    @param criteria Termination criteria for optimization\n    @param idx Indices of image pairs that pass initialization, which are really used in calibration. So the size of rvecs is the\n    same as idx.total().\n    @\n    */\n    CV_EXPORTS_W double stereoCalibrate(InputOutputArrayOfArrays objectPoints, InputOutputArrayOfArrays imagePoints1, InputOutputArrayOfArrays imagePoints2,\n        const Size& imageSize1, const Size& imageSize2, InputOutputArray K1, InputOutputArray xi1, InputOutputArray D1, InputOutputArray K2, InputOutputArray xi2,\n        InputOutputArray D2, OutputArray rvec, OutputArray tvec, OutputArrayOfArrays rvecsL, OutputArrayOfArrays tvecsL, int flags, TermCriteria criteria, OutputArray idx=noArray());\n\n    /** @brief Stereo rectification for omnidirectional camera model. It computes the rectification rotations for two cameras\n\n    @param R Rotation between the first and second camera\n    @param T Translation between the first and second camera\n    @param R1 Output 3x3 rotation matrix for the first camera\n    @param R2 Output 3x3 rotation matrix for the second camera\n    */\n    CV_EXPORTS_W void stereoRectify(InputArray R, InputArray T, OutputArray R1, OutputArray R2);\n\n    /** @brief Stereo 3D reconstruction from a pair of images\n\n    @param image1 The first input image\n    @param image2 The second input image\n    @param K1 Input camera matrix of the first camera\n    @param D1 Input distortion parameters \\f$(k_1, k_2, p_1, p_2)\\f$ for the first camera\n    @param xi1 Input parameter xi for the first camera for CMei's model\n    @param K2 Input camera matrix of the second camera\n    @param D2 Input distortion parameters \\f$(k_1, k_2, p_1, p_2)\\f$ for the second camera\n    @param xi2 Input parameter xi for the second camera for CMei's model\n    @param R Rotation between the first and second camera\n    @param T Translation between the first and second camera\n    @param flag Flag of rectification type, RECTIFY_PERSPECTIVE or RECTIFY_LONGLATI\n    @param numDisparities The parameter 'numDisparities' in StereoSGBM, see StereoSGBM for details.\n    @param SADWindowSize The parameter 'SADWindowSize' in StereoSGBM, see StereoSGBM for details.\n    @param disparity Disparity map generated by stereo matching\n    @param image1Rec Rectified image of the first image\n    @param image2Rec rectified image of the second image\n    @param newSize Image size of rectified image, see omnidir::undistortImage\n    @param Knew New camera matrix of rectified image, see omnidir::undistortImage\n    @param pointCloud Point cloud of 3D reconstruction, with type CV_64FC3\n    @param pointType Point cloud type, it can be XYZRGB or XYZ\n    */\n    CV_EXPORTS_W void stereoReconstruct(InputArray image1, InputArray image2, InputArray K1, InputArray D1, InputArray xi1,\n        InputArray K2, InputArray D2, InputArray xi2, InputArray R, InputArray T, int flag, int numDisparities, int SADWindowSize,\n        OutputArray disparity, OutputArray image1Rec, OutputArray image2Rec, const Size& newSize = Size(), InputArray Knew = cv::noArray(),\n        OutputArray pointCloud = cv::noArray(), int pointType = XYZRGB);\n\nnamespace internal\n{\n    void initializeCalibration(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size size, OutputArrayOfArrays omAll,\n        OutputArrayOfArrays tAll, OutputArray K, double& xi, OutputArray idx = noArray());\n\n    void initializeStereoCalibration(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,\n        const Size& size1, const Size& size2, OutputArray om, OutputArray T, OutputArrayOfArrays omL, OutputArrayOfArrays tL, OutputArray K1, OutputArray D1, OutputArray K2, OutputArray D2,\n        double &xi1, double &xi2, int flags, OutputArray idx);\n\n    void computeJacobian(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags,\n\t\t\t\t\t\t\tdouble epsilon);\n\n    void computeJacobianStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,\n        InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags, double epsilon);\n\n    void encodeParameters(InputArray K, InputArrayOfArrays omAll, InputArrayOfArrays tAll, InputArray distoaration, double xi, OutputArray parameters);\n\n    void encodeParametersStereo(InputArray K1, InputArray K2, InputArray om, InputArray T, InputArrayOfArrays omL, InputArrayOfArrays tL,\n        InputArray D1, InputArray D2, double xi1, double xi2, OutputArray parameters);\n\n    void decodeParameters(InputArray paramsters, OutputArray K, OutputArrayOfArrays omAll, OutputArrayOfArrays tAll, OutputArray distoration, double& xi);\n\n    void decodeParametersStereo(InputArray parameters, OutputArray K1, OutputArray K2, OutputArray om, OutputArray T, OutputArrayOfArrays omL,\n        OutputArrayOfArrays tL, OutputArray D1, OutputArray D2, double& xi1, double& xi2);\n\n    void estimateUncertainties(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray parameters, Mat& errors, Vec2d& std_error, double& rms, int flags);\n\n    void estimateUncertaintiesStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputArray parameters, Mat& errors,\n        Vec2d& std_error, double& rms, int flags);\n\n    double computeMeanReproErr(InputArrayOfArrays imagePoints, InputArrayOfArrays proImagePoints);\n\n    double computeMeanReproErr(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray K, InputArray D, double xi, InputArrayOfArrays omAll,\n        InputArrayOfArrays tAll);\n\n    double computeMeanReproErrStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputArray K1, InputArray K2,\n        InputArray D1, InputArray D2, double xi1, double xi2, InputArray om, InputArray T, InputArrayOfArrays omL, InputArrayOfArrays TL);\n\n    void checkFixed(Mat &G, int flags, int n);\n\n    void subMatrix(const Mat& src, Mat& dst, const std::vector<int>& cols, const std::vector<int>& rows);\n\n    void flags2idx(int flags, std::vector<int>& idx, int n);\n\n    void flags2idxStereo(int flags, std::vector<int>& idx, int n);\n\n    void fillFixed(Mat&G, int flags, int n);\n\n    void fillFixedStereo(Mat& G, int flags, int n);\n\n    double findMedian(const Mat& row);\n\n    Vec3d findMedian3(InputArray mat);\n\n    void getInterset(InputArray idx1, InputArray idx2, OutputArray inter1, OutputArray inter2, OutputArray inter_ori);\n\n    void compose_motion(InputArray _om1, InputArray _T1, InputArray _om2, InputArray _T2, Mat& om3, Mat& T3, Mat& dom3dom1,\n        Mat& dom3dT1, Mat& dom3dom2, Mat& dom3dT2, Mat& dT3dom1, Mat& dT3dT1, Mat& dT3dom2, Mat& dT3dT2);\n\n    //void JRodriguesMatlab(const Mat& src, Mat& dst);\n\n    //void dAB(InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB);\n} // internal\n\n//! @}\n\n} // omnidir\n\n} //cv\n#endif"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ccalib/randpattern.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, Baisheng Lai (laibaisheng@gmail.com), Zhejiang University,\n// all rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_RANDOMPATTERN_HPP__\n#define __OPENCV_RANDOMPATTERN_HPP__\n\n#include \"opencv2/features2d.hpp\"\n#include \"opencv2/highgui.hpp\"\n\nnamespace cv { namespace randpattern {\n\n\n//! @addtogroup ccalib\n//! @{\n\n/** @brief Class for finding features points and corresponding 3D in world coordinate of\na \"random\" pattern, which can be to be used in calibration. It is useful when pattern is\npartly occluded or only a part of pattern can be observed in multiple cameras calibration.\nThe pattern can be generated by RandomPatternGenerator class described in this file.\n\nPlease refer to paper\n    B. Li, L. Heng, K. Kevin  and M. Pollefeys, \"A Multiple-Camera System\n    Calibration Toolbox Using A Feature Descriptor-Based Calibration\n    Pattern\", in IROS 2013.\n*/\n\nclass CV_EXPORTS RandomPatternCornerFinder\n{\npublic:\n\n    /* @brief Construct RandomPatternCornerFinder object\n\n    @param patternWidth the real width of \"random\" pattern in a user defined unit.\n    @param patternHeight the real height of \"random\" pattern in a user defined unit.\n    @param nMiniMatch number of minimal matches, otherwise that image is abandoned\n    @depth depth of output objectPoints and imagePoints, set it to be CV_32F or CV_64F.\n    @showExtraction whether show feature extraction, 0 for no and 1 for yes.\n    @detector feature detector to detect feature points in pattern and images.\n    @descriptor feature descriptor.\n    @matcher feature matcher.\n    */\n    RandomPatternCornerFinder(float patternWidth, float patternHeight,\n        int nminiMatch = 20, int depth = CV_32F, int verbose = 0, int showExtraction = 0,\n        Ptr<FeatureDetector> detector = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.005f),\n        Ptr<DescriptorExtractor> descriptor = AKAZE::create(AKAZE::DESCRIPTOR_MLDB,0, 3, 0.005f),\n        Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(\"BruteForce-L1\"));\n\n    /* @brief Load pattern image and compute features for pattern\n    @param patternImage image for \"random\" pattern generated by RandomPatternGenerator, run it first.\n    */\n    void loadPattern(cv::Mat patternImage);\n\n    /* @brief Compute matched object points and image points which are used for calibration\n    The objectPoints (3D) and imagePoints (2D) are stored inside the class. Run getObjectPoints()\n    and getImagePoints() to get them.\n\n    @param inputImages vector of 8-bit grayscale images containing \"random\" pattern\n    that are used for calibration.\n    */\n    void computeObjectImagePoints(std::vector<cv::Mat> inputImages);\n\n    //void computeObjectImagePoints2(std::vector<cv::Mat> inputImages);\n\n    /* @brief Compute object and image points for a single image. It returns a vector<Mat> that\n    the first element stores the imagePoints and the second one stores the objectPoints.\n\n    @param inputImage single input image for calibration\n    */\n    std::vector<cv::Mat> computeObjectImagePointsForSingle(cv::Mat inputImage);\n\n    /* @brief Get object(3D) points\n    */\n    std::vector<cv::Mat> getObjectPoints();\n\n    /* @brief and image(2D) points\n    */\n    std::vector<cv::Mat> getImagePoints();\n\nprivate:\n\n    std::vector<cv::Mat> _objectPonits, _imagePoints;\n    float _patternWidth, _patternHeight;\n    cv::Size _patternImageSize;\n    int _nminiMatch;\n    int _depth;\n\tint _verbose;\n\n    Ptr<FeatureDetector> _detector;\n    Ptr<DescriptorExtractor> _descriptor;\n    Ptr<DescriptorMatcher> _matcher;\n    Mat _descriptorPattern;\n    std::vector<cv::KeyPoint> _keypointsPattern;\n    Mat _patternImage;\n    int _showExtraction;\n\n    void keyPoints2MatchedLocation(const std::vector<cv::KeyPoint>& imageKeypoints,\n        const std::vector<cv::KeyPoint>& patternKeypoints, const std::vector<cv::DMatch> matchces,\n        cv::Mat& matchedImagelocation, cv::Mat& matchedPatternLocation);\n    void getFilteredLocation(cv::Mat& imageKeypoints, cv::Mat& patternKeypoints, const cv::Mat mask);\n    void getObjectImagePoints(const cv::Mat& imageKeypoints, const cv::Mat& patternKeypoints);\n    void crossCheckMatching( cv::Ptr<DescriptorMatcher>& descriptorMatcher,\n        const Mat& descriptors1, const Mat& descriptors2,\n        std::vector<DMatch>& filteredMatches12, int knn=1 );\n    void drawCorrespondence(const Mat& image1, const std::vector<cv::KeyPoint> keypoint1,\n        const Mat& image2, const std::vector<cv::KeyPoint> keypoint2, const std::vector<cv::DMatch> matchces,\n        const Mat& mask1, const Mat& mask2, const int step);\n};\n\n/* @brief Class to generate \"random\" pattern image that are used for RandomPatternCornerFinder\nPlease refer to paper\nB. Li, L. Heng, K. Kevin  and M. Pollefeys, \"A Multiple-Camera System\nCalibration Toolbox Using A Feature Descriptor-Based Calibration\nPattern\", in IROS 2013.\n*/\nclass CV_EXPORTS RandomPatternGenerator\n{\npublic:\n    /* @brief Construct RandomPatternGenerator\n\n    @param imageWidth image width of the generated pattern image\n    @param imageHeight image height of the generated pattern image\n    */\n    RandomPatternGenerator(int imageWidth, int imageHeight);\n\n    /* @brief Generate pattern\n    */\n    void generatePattern();\n    /* @brief Get pattern\n    */\n    cv::Mat getPattern();\nprivate:\n    cv::Mat _pattern;\n    int _imageWidth, _imageHeight;\n};\n\n//! @}\n\n}} //namespace randpattern, cv\n#endif"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ccalib.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2014, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_CCALIB_HPP__\n#define __OPENCV_CCALIB_HPP__\n\n#include <opencv2/core.hpp>\n#include <opencv2/features2d.hpp>\n#include <opencv2/imgproc.hpp>\n#include <opencv2/calib3d.hpp>\n\n#include <vector>\n\n/** @defgroup ccalib Custom Calibration Pattern for 3D reconstruction\n*/\n\nnamespace cv{ namespace ccalib{\n\n//! @addtogroup ccalib\n//! @{\n\nclass CV_EXPORTS CustomPattern : public Algorithm\n{\npublic:\n\tCustomPattern();\n\tvirtual ~CustomPattern();\n\n\tbool create(InputArray pattern, const Size2f boardSize, OutputArray output = noArray());\n\n\tbool findPattern(InputArray image, OutputArray matched_features, OutputArray pattern_points, const double ratio = 0.7,\n\t\t\t\t\t const double proj_error = 8.0, const bool refine_position = false, OutputArray out = noArray(),\n\t\t\t\t\t OutputArray H = noArray(), OutputArray pattern_corners = noArray());\n\n\tbool isInitialized();\n\n\tvoid getPatternPoints(OutputArray original_points);\n    /**<\n\t\tReturns a vector<Point> of the original points.\n\t*/\n\tdouble getPixelSize();\n    /**<\n\t\tGet the pixel size of the pattern\n\t*/\n\n\tbool setFeatureDetector(Ptr<FeatureDetector> featureDetector);\n\tbool setDescriptorExtractor(Ptr<DescriptorExtractor> extractor);\n\tbool setDescriptorMatcher(Ptr<DescriptorMatcher> matcher);\n\n\tPtr<FeatureDetector> getFeatureDetector();\n\tPtr<DescriptorExtractor> getDescriptorExtractor();\n\tPtr<DescriptorMatcher> getDescriptorMatcher();\n\n\tdouble calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,\n\t\t\t\tSize imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,\n\t\t\t\tOutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,\n\t\t\t\tTermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));\n    /**<\n\t\tCalls the calirateCamera function with the same inputs.\n\t*/\n\n\tbool findRt(InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs,\n                OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE);\n\tbool findRt(InputArray image, InputArray cameraMatrix, InputArray distCoeffs,\n                OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE);\n    /**<\n\t\tUses solvePnP to find the rotation and translation of the pattern\n\t\twith respect to the camera frame.\n\t*/\n\n\tbool findRtRANSAC(InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs,\n\t\t\t\tOutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int iterationsCount = 100,\n\t\t\t\tfloat reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE);\n\tbool findRtRANSAC(InputArray image, InputArray cameraMatrix, InputArray distCoeffs,\n\t\t\t\tOutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int iterationsCount = 100,\n\t\t\t\tfloat reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE);\n        /**<\n\t\tUses solvePnPRansac()\n\t*/\n\n\tvoid drawOrientation(InputOutputArray image, InputArray tvec, InputArray rvec, InputArray cameraMatrix,\n\t\t\t\t\t\t InputArray distCoeffs, double axis_length = 3, int axis_width = 2);\n    /**<\n\t\tpattern_corners -> projected over the image position of the edges of the pattern.\n\t*/\n\nprivate:\n\n\tMat img_roi;\n\tstd::vector<Point2f> obj_corners;\n\tdouble pxSize;\n\n\tbool initialized;\n\n\tPtr<FeatureDetector> detector;\n\tPtr<DescriptorExtractor> descriptorExtractor;\n\tPtr<DescriptorMatcher> descriptorMatcher;\n\n\tstd::vector<KeyPoint> keypoints;\n\tstd::vector<Point3f> points3d;\n\tMat descriptor;\n\n\tbool init(Mat& image, const float pixel_size, OutputArray output = noArray());\n\tbool findPatternPass(const Mat& image, std::vector<Point2f>& matched_features, std::vector<Point3f>& pattern_points,\n\t\t\t\t\t\t Mat& H, std::vector<Point2f>& scene_corners, const double pratio, const double proj_error,\n\t\t\t\t\t\t const bool refine_position = false, const Mat& mask = Mat(), OutputArray output = noArray());\n\tvoid scaleFoundPoints(const double squareSize, const std::vector<KeyPoint>& corners, std::vector<Point3f>& pts3d);\n\tvoid check_matches(std::vector<Point2f>& matched, const std::vector<Point2f>& pattern, std::vector<DMatch>& good, std::vector<Point3f>& pattern_3d, const Mat& H);\n\n\tvoid keypoints2points(const std::vector<KeyPoint>& in, std::vector<Point2f>& out);\n\tvoid updateKeypointsPos(std::vector<KeyPoint>& in, const std::vector<Point2f>& new_pos);\n\tvoid refinePointsPos(const Mat& img, std::vector<Point2f>& p);\n\tvoid refineKeypointsPos(const Mat& img, std::vector<KeyPoint>& kp);\n};\n\n//! @}\n\n}} // namespace ccalib, cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/affine.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_AFFINE3_HPP__\n#define __OPENCV_CORE_AFFINE3_HPP__\n\n#ifdef __cplusplus\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\n\n//! @addtogroup core\n//! @{\n\n    /** @brief Affine transform\n      @todo document\n     */\n    template<typename T>\n    class Affine3\n    {\n    public:\n        typedef T float_type;\n        typedef Matx<float_type, 3, 3> Mat3;\n        typedef Matx<float_type, 4, 4> Mat4;\n        typedef Vec<float_type, 3> Vec3;\n\n        Affine3();\n\n        //! Augmented affine matrix\n        Affine3(const Mat4& affine);\n\n        //! Rotation matrix\n        Affine3(const Mat3& R, const Vec3& t = Vec3::all(0));\n\n        //! Rodrigues vector\n        Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0));\n\n        //! Combines all contructors above. Supports 4x4, 4x3, 3x3, 1x3, 3x1 sizes of data matrix\n        explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0));\n\n        //! From 16th element array\n        explicit Affine3(const float_type* vals);\n\n        //! Create identity transform\n        static Affine3 Identity();\n\n        //! Rotation matrix\n        void rotation(const Mat3& R);\n\n        //! Rodrigues vector\n        void rotation(const Vec3& rvec);\n\n        //! Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix;\n        void rotation(const Mat& data);\n\n        void linear(const Mat3& L);\n        void translation(const Vec3& t);\n\n        Mat3 rotation() const;\n        Mat3 linear() const;\n        Vec3 translation() const;\n\n        //! Rodrigues vector\n        Vec3 rvec() const;\n\n        Affine3 inv(int method = cv::DECOMP_SVD) const;\n\n        //! a.rotate(R) is equivalent to Affine(R, 0) * a;\n        Affine3 rotate(const Mat3& R) const;\n\n        //! a.rotate(rvec) is equivalent to Affine(rvec, 0) * a;\n        Affine3 rotate(const Vec3& rvec) const;\n\n        //! a.translate(t) is equivalent to Affine(E, t) * a;\n        Affine3 translate(const Vec3& t) const;\n\n        //! a.concatenate(affine) is equivalent to affine * a;\n        Affine3 concatenate(const Affine3& affine) const;\n\n        template <typename Y> operator Affine3<Y>() const;\n\n        template <typename Y> Affine3<Y> cast() const;\n\n        Mat4 matrix;\n\n#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H\n        Affine3(const Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>& affine);\n        Affine3(const Eigen::Transform<T, 3, Eigen::Affine>& affine);\n        operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>() const;\n        operator Eigen::Transform<T, 3, Eigen::Affine>() const;\n#endif\n    };\n\n    template<typename T> static\n    Affine3<T> operator*(const Affine3<T>& affine1, const Affine3<T>& affine2);\n\n    template<typename T, typename V> static\n    V operator*(const Affine3<T>& affine, const V& vector);\n\n    typedef Affine3<float> Affine3f;\n    typedef Affine3<double> Affine3d;\n\n    static Vec3f operator*(const Affine3f& affine, const Vec3f& vector);\n    static Vec3d operator*(const Affine3d& affine, const Vec3d& vector);\n\n    template<typename _Tp> class DataType< Affine3<_Tp> >\n    {\n    public:\n        typedef Affine3<_Tp>                               value_type;\n        typedef Affine3<typename DataType<_Tp>::work_type> work_type;\n        typedef _Tp                                        channel_type;\n\n        enum { generic_type = 0,\n               depth        = DataType<channel_type>::depth,\n               channels     = 16,\n               fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n               type         = CV_MAKETYPE(depth, channels)\n             };\n\n        typedef Vec<channel_type, channels> vec_type;\n    };\n\n//! @} core\n\n}\n\n//! @cond IGNORED\n\n///////////////////////////////////////////////////////////////////////////////////\n// Implementaiton\n\ntemplate<typename T> inline\ncv::Affine3<T>::Affine3()\n    : matrix(Mat4::eye())\n{}\n\ntemplate<typename T> inline\ncv::Affine3<T>::Affine3(const Mat4& affine)\n    : matrix(affine)\n{}\n\ntemplate<typename T> inline\ncv::Affine3<T>::Affine3(const Mat3& R, const Vec3& t)\n{\n    rotation(R);\n    translation(t);\n    matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;\n    matrix.val[15] = 1;\n}\n\ntemplate<typename T> inline\ncv::Affine3<T>::Affine3(const Vec3& _rvec, const Vec3& t)\n{\n    rotation(_rvec);\n    translation(t);\n    matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;\n    matrix.val[15] = 1;\n}\n\ntemplate<typename T> inline\ncv::Affine3<T>::Affine3(const cv::Mat& data, const Vec3& t)\n{\n    CV_Assert(data.type() == cv::DataType<T>::type);\n\n    if (data.cols == 4 && data.rows == 4)\n    {\n        data.copyTo(matrix);\n        return;\n    }\n    else if (data.cols == 4 && data.rows == 3)\n    {\n        rotation(data(Rect(0, 0, 3, 3)));\n        translation(data(Rect(3, 0, 1, 3)));\n        return;\n    }\n\n    rotation(data);\n    translation(t);\n    matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;\n    matrix.val[15] = 1;\n}\n\ntemplate<typename T> inline\ncv::Affine3<T>::Affine3(const float_type* vals) : matrix(vals)\n{}\n\ntemplate<typename T> inline\ncv::Affine3<T> cv::Affine3<T>::Identity()\n{\n    return Affine3<T>(cv::Affine3<T>::Mat4::eye());\n}\n\ntemplate<typename T> inline\nvoid cv::Affine3<T>::rotation(const Mat3& R)\n{\n    linear(R);\n}\n\ntemplate<typename T> inline\nvoid cv::Affine3<T>::rotation(const Vec3& _rvec)\n{\n    double rx = _rvec[0], ry = _rvec[1], rz = _rvec[2];\n    double theta = std::sqrt(rx*rx + ry*ry + rz*rz);\n\n    if (theta < DBL_EPSILON)\n        rotation(Mat3::eye());\n    else\n    {\n        const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };\n\n        double c = std::cos(theta);\n        double s = std::sin(theta);\n        double c1 = 1. - c;\n        double itheta = (theta != 0) ? 1./theta : 0.;\n\n        rx *= itheta; ry *= itheta; rz *= itheta;\n\n        double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz };\n        double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 };\n        Mat3 R;\n\n        // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]\n        // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]\n        for(int k = 0; k < 9; ++k)\n            R.val[k] = static_cast<float_type>(c*I[k] + c1*rrt[k] + s*_r_x_[k]);\n\n        rotation(R);\n    }\n}\n\n//Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix;\ntemplate<typename T> inline\nvoid cv::Affine3<T>::rotation(const cv::Mat& data)\n{\n    CV_Assert(data.type() == cv::DataType<T>::type);\n\n    if (data.cols == 3 && data.rows == 3)\n    {\n        Mat3 R;\n        data.copyTo(R);\n        rotation(R);\n    }\n    else if ((data.cols == 3 && data.rows == 1) || (data.cols == 1 && data.rows == 3))\n    {\n        Vec3 _rvec;\n        data.reshape(1, 3).copyTo(_rvec);\n        rotation(_rvec);\n    }\n    else\n        CV_Assert(!\"Input marix can be 3x3, 1x3 or 3x1\");\n}\n\ntemplate<typename T> inline\nvoid cv::Affine3<T>::linear(const Mat3& L)\n{\n    matrix.val[0] = L.val[0]; matrix.val[1] = L.val[1];  matrix.val[ 2] = L.val[2];\n    matrix.val[4] = L.val[3]; matrix.val[5] = L.val[4];  matrix.val[ 6] = L.val[5];\n    matrix.val[8] = L.val[6]; matrix.val[9] = L.val[7];  matrix.val[10] = L.val[8];\n}\n\ntemplate<typename T> inline\nvoid cv::Affine3<T>::translation(const Vec3& t)\n{\n    matrix.val[3] = t[0]; matrix.val[7] = t[1]; matrix.val[11] = t[2];\n}\n\ntemplate<typename T> inline\ntypename cv::Affine3<T>::Mat3 cv::Affine3<T>::rotation() const\n{\n    return linear();\n}\n\ntemplate<typename T> inline\ntypename cv::Affine3<T>::Mat3 cv::Affine3<T>::linear() const\n{\n    typename cv::Affine3<T>::Mat3 R;\n    R.val[0] = matrix.val[0];  R.val[1] = matrix.val[1];  R.val[2] = matrix.val[ 2];\n    R.val[3] = matrix.val[4];  R.val[4] = matrix.val[5];  R.val[5] = matrix.val[ 6];\n    R.val[6] = matrix.val[8];  R.val[7] = matrix.val[9];  R.val[8] = matrix.val[10];\n    return R;\n}\n\ntemplate<typename T> inline\ntypename cv::Affine3<T>::Vec3 cv::Affine3<T>::translation() const\n{\n    return Vec3(matrix.val[3], matrix.val[7], matrix.val[11]);\n}\n\ntemplate<typename T> inline\ntypename cv::Affine3<T>::Vec3 cv::Affine3<T>::rvec() const\n{\n    cv::Vec3d w;\n    cv::Matx33d u, vt, R = rotation();\n    cv::SVD::compute(R, w, u, vt, cv::SVD::FULL_UV + cv::SVD::MODIFY_A);\n    R = u * vt;\n\n    double rx = R.val[7] - R.val[5];\n    double ry = R.val[2] - R.val[6];\n    double rz = R.val[3] - R.val[1];\n\n    double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);\n    double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5;\n    c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c;\n    double theta = acos(c);\n\n    if( s < 1e-5 )\n    {\n        if( c > 0 )\n            rx = ry = rz = 0;\n        else\n        {\n            double t;\n            t = (R.val[0] + 1) * 0.5;\n            rx = std::sqrt(std::max(t, 0.0));\n            t = (R.val[4] + 1) * 0.5;\n            ry = std::sqrt(std::max(t, 0.0)) * (R.val[1] < 0 ? -1.0 : 1.0);\n            t = (R.val[8] + 1) * 0.5;\n            rz = std::sqrt(std::max(t, 0.0)) * (R.val[2] < 0 ? -1.0 : 1.0);\n\n            if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R.val[5] > 0) != (ry*rz > 0) )\n                rz = -rz;\n            theta /= std::sqrt(rx*rx + ry*ry + rz*rz);\n            rx *= theta;\n            ry *= theta;\n            rz *= theta;\n        }\n    }\n    else\n    {\n        double vth = 1/(2*s);\n        vth *= theta;\n        rx *= vth; ry *= vth; rz *= vth;\n    }\n\n    return cv::Vec3d(rx, ry, rz);\n}\n\ntemplate<typename T> inline\ncv::Affine3<T> cv::Affine3<T>::inv(int method) const\n{\n    return matrix.inv(method);\n}\n\ntemplate<typename T> inline\ncv::Affine3<T> cv::Affine3<T>::rotate(const Mat3& R) const\n{\n    Mat3 Lc = linear();\n    Vec3 tc = translation();\n    Mat4 result;\n    result.val[12] = result.val[13] = result.val[14] = 0;\n    result.val[15] = 1;\n\n    for(int j = 0; j < 3; ++j)\n    {\n        for(int i = 0; i < 3; ++i)\n        {\n            float_type value = 0;\n            for(int k = 0; k < 3; ++k)\n                value += R(j, k) * Lc(k, i);\n            result(j, i) = value;\n        }\n\n        result(j, 3) = R.row(j).dot(tc.t());\n    }\n    return result;\n}\n\ntemplate<typename T> inline\ncv::Affine3<T> cv::Affine3<T>::rotate(const Vec3& _rvec) const\n{\n    return rotate(Affine3f(_rvec).rotation());\n}\n\ntemplate<typename T> inline\ncv::Affine3<T> cv::Affine3<T>::translate(const Vec3& t) const\n{\n    Mat4 m = matrix;\n    m.val[ 3] += t[0];\n    m.val[ 7] += t[1];\n    m.val[11] += t[2];\n    return m;\n}\n\ntemplate<typename T> inline\ncv::Affine3<T> cv::Affine3<T>::concatenate(const Affine3<T>& affine) const\n{\n    return (*this).rotate(affine.rotation()).translate(affine.translation());\n}\n\ntemplate<typename T> template <typename Y> inline\ncv::Affine3<T>::operator Affine3<Y>() const\n{\n    return Affine3<Y>(matrix);\n}\n\ntemplate<typename T> template <typename Y> inline\ncv::Affine3<Y> cv::Affine3<T>::cast() const\n{\n    return Affine3<Y>(matrix);\n}\n\ntemplate<typename T> inline\ncv::Affine3<T> cv::operator*(const cv::Affine3<T>& affine1, const cv::Affine3<T>& affine2)\n{\n    return affine2.concatenate(affine1);\n}\n\ntemplate<typename T, typename V> inline\nV cv::operator*(const cv::Affine3<T>& affine, const V& v)\n{\n    const typename Affine3<T>::Mat4& m = affine.matrix;\n\n    V r;\n    r.x = m.val[0] * v.x + m.val[1] * v.y + m.val[ 2] * v.z + m.val[ 3];\n    r.y = m.val[4] * v.x + m.val[5] * v.y + m.val[ 6] * v.z + m.val[ 7];\n    r.z = m.val[8] * v.x + m.val[9] * v.y + m.val[10] * v.z + m.val[11];\n    return r;\n}\n\nstatic inline\ncv::Vec3f cv::operator*(const cv::Affine3f& affine, const cv::Vec3f& v)\n{\n    const cv::Matx44f& m = affine.matrix;\n    cv::Vec3f r;\n    r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];\n    r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];\n    r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];\n    return r;\n}\n\nstatic inline\ncv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v)\n{\n    const cv::Matx44d& m = affine.matrix;\n    cv::Vec3d r;\n    r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];\n    r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];\n    r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];\n    return r;\n}\n\n\n\n#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H\n\ntemplate<typename T> inline\ncv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>& affine)\n{\n    cv::Mat(4, 4, cv::DataType<T>::type, affine.matrix().data()).copyTo(matrix);\n}\n\ntemplate<typename T> inline\ncv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine>& affine)\n{\n    Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> a = affine;\n    cv::Mat(4, 4, cv::DataType<T>::type, a.matrix().data()).copyTo(matrix);\n}\n\ntemplate<typename T> inline\ncv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>() const\n{\n    Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> r;\n    cv::Mat hdr(4, 4, cv::DataType<T>::type, r.matrix().data());\n    cv::Mat(matrix, false).copyTo(hdr);\n    return r;\n}\n\ntemplate<typename T> inline\ncv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine>() const\n{\n    return this->operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>();\n}\n\n#endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */\n\n//! @endcond\n\n#endif /* __cplusplus */\n\n#endif /* __OPENCV_CORE_AFFINE3_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/base.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2014, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_BASE_HPP__\n#define __OPENCV_CORE_BASE_HPP__\n\n#ifndef __cplusplus\n#  error base.hpp header must be compiled as C++\n#endif\n\n#include <climits>\n#include <algorithm>\n\n#include \"opencv2/core/cvdef.h\"\n#include \"opencv2/core/cvstd.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup core_utils\n//! @{\n\nnamespace Error {\n//! error codes\nenum Code {\n    StsOk=                       0,  //!< everithing is ok\n    StsBackTrace=               -1,  //!< pseudo error for back trace\n    StsError=                   -2,  //!< unknown /unspecified error\n    StsInternal=                -3,  //!< internal error (bad state)\n    StsNoMem=                   -4,  //!< insufficient memory\n    StsBadArg=                  -5,  //!< function arg/param is bad\n    StsBadFunc=                 -6,  //!< unsupported function\n    StsNoConv=                  -7,  //!< iter. didn't converge\n    StsAutoTrace=               -8,  //!< tracing\n    HeaderIsNull=               -9,  //!< image header is NULL\n    BadImageSize=              -10,  //!< image size is invalid\n    BadOffset=                 -11,  //!< offset is invalid\n    BadDataPtr=                -12,  //!<\n    BadStep=                   -13,  //!<\n    BadModelOrChSeq=           -14,  //!<\n    BadNumChannels=            -15,  //!<\n    BadNumChannel1U=           -16,  //!<\n    BadDepth=                  -17,  //!<\n    BadAlphaChannel=           -18,  //!<\n    BadOrder=                  -19,  //!<\n    BadOrigin=                 -20,  //!<\n    BadAlign=                  -21,  //!<\n    BadCallBack=               -22,  //!<\n    BadTileSize=               -23,  //!<\n    BadCOI=                    -24,  //!<\n    BadROISize=                -25,  //!<\n    MaskIsTiled=               -26,  //!<\n    StsNullPtr=                -27,  //!< null pointer\n    StsVecLengthErr=           -28,  //!< incorrect vector length\n    StsFilterStructContentErr= -29,  //!< incorr. filter structure content\n    StsKernelStructContentErr= -30,  //!< incorr. transform kernel content\n    StsFilterOffsetErr=        -31,  //!< incorrect filter ofset value\n    StsBadSize=                -201, //!< the input/output structure size is incorrect\n    StsDivByZero=              -202, //!< division by zero\n    StsInplaceNotSupported=    -203, //!< in-place operation is not supported\n    StsObjectNotFound=         -204, //!< request can't be completed\n    StsUnmatchedFormats=       -205, //!< formats of input/output arrays differ\n    StsBadFlag=                -206, //!< flag is wrong or not supported\n    StsBadPoint=               -207, //!< bad CvPoint\n    StsBadMask=                -208, //!< bad format of mask (neither 8uC1 nor 8sC1)\n    StsUnmatchedSizes=         -209, //!< sizes of input/output structures do not match\n    StsUnsupportedFormat=      -210, //!< the data format/type is not supported by the function\n    StsOutOfRange=             -211, //!< some of parameters are out of range\n    StsParseError=             -212, //!< invalid syntax/structure of the parsed file\n    StsNotImplemented=         -213, //!< the requested function/feature is not implemented\n    StsBadMemBlock=            -214, //!< an allocated block has been corrupted\n    StsAssert=                 -215, //!< assertion failed\n    GpuNotSupported=           -216,\n    GpuApiCallError=           -217,\n    OpenGlNotSupported=        -218,\n    OpenGlApiCallError=        -219,\n    OpenCLApiCallError=        -220,\n    OpenCLDoubleNotSupported=  -221,\n    OpenCLInitError=           -222,\n    OpenCLNoAMDBlasFft=        -223\n};\n} //Error\n\n//! @} core_utils\n\n//! @addtogroup core_array\n//! @{\n\n//! matrix decomposition types\nenum DecompTypes {\n    /** Gaussian elimination with the optimal pivot element chosen. */\n    DECOMP_LU       = 0,\n    /** singular value decomposition (SVD) method; the system can be over-defined and/or the matrix\n    src1 can be singular */\n    DECOMP_SVD      = 1,\n    /** eigenvalue decomposition; the matrix src1 must be symmetrical */\n    DECOMP_EIG      = 2,\n    /** Cholesky \\f$LL^T\\f$ factorization; the matrix src1 must be symmetrical and positively\n    defined */\n    DECOMP_CHOLESKY = 3,\n    /** QR factorization; the system can be over-defined and/or the matrix src1 can be singular */\n    DECOMP_QR       = 4,\n    /** while all the previous flags are mutually exclusive, this flag can be used together with\n    any of the previous; it means that the normal equations\n    \\f$\\texttt{src1}^T\\cdot\\texttt{src1}\\cdot\\texttt{dst}=\\texttt{src1}^T\\texttt{src2}\\f$ are\n    solved instead of the original system\n    \\f$\\texttt{src1}\\cdot\\texttt{dst}=\\texttt{src2}\\f$ */\n    DECOMP_NORMAL   = 16\n};\n\n/** norm types\n- For one array:\n\\f[norm =  \\forkthree{\\|\\texttt{src1}\\|_{L_{\\infty}} =  \\max _I | \\texttt{src1} (I)|}{if  \\(\\texttt{normType} = \\texttt{NORM_INF}\\) }\n{ \\| \\texttt{src1} \\| _{L_1} =  \\sum _I | \\texttt{src1} (I)|}{if  \\(\\texttt{normType} = \\texttt{NORM_L1}\\) }\n{ \\| \\texttt{src1} \\| _{L_2} =  \\sqrt{\\sum_I \\texttt{src1}(I)^2} }{if  \\(\\texttt{normType} = \\texttt{NORM_L2}\\) }\\f]\n\n- Absolute norm for two arrays\n\\f[norm =  \\forkthree{\\|\\texttt{src1}-\\texttt{src2}\\|_{L_{\\infty}} =  \\max _I | \\texttt{src1} (I) -  \\texttt{src2} (I)|}{if  \\(\\texttt{normType} = \\texttt{NORM_INF}\\) }\n{ \\| \\texttt{src1} - \\texttt{src2} \\| _{L_1} =  \\sum _I | \\texttt{src1} (I) -  \\texttt{src2} (I)|}{if  \\(\\texttt{normType} = \\texttt{NORM_L1}\\) }\n{ \\| \\texttt{src1} - \\texttt{src2} \\| _{L_2} =  \\sqrt{\\sum_I (\\texttt{src1}(I) - \\texttt{src2}(I))^2} }{if  \\(\\texttt{normType} = \\texttt{NORM_L2}\\) }\\f]\n\n- Relative norm for two arrays\n\\f[norm =  \\forkthree{\\frac{\\|\\texttt{src1}-\\texttt{src2}\\|_{L_{\\infty}}    }{\\|\\texttt{src2}\\|_{L_{\\infty}} }}{if  \\(\\texttt{normType} = \\texttt{NORM_RELATIVE_INF}\\) }\n{ \\frac{\\|\\texttt{src1}-\\texttt{src2}\\|_{L_1} }{\\|\\texttt{src2}\\|_{L_1}} }{if  \\(\\texttt{normType} = \\texttt{NORM_RELATIVE_L1}\\) }\n{ \\frac{\\|\\texttt{src1}-\\texttt{src2}\\|_{L_2} }{\\|\\texttt{src2}\\|_{L_2}} }{if  \\(\\texttt{normType} = \\texttt{NORM_RELATIVE_L2}\\) }\\f]\n\nAs example for one array consider the function \\f$r(x)= \\begin{pmatrix} x \\\\ 1-x \\end{pmatrix}, x \\in [-1;1]\\f$.\nThe \\f$ L_{1}, L_{2} \\f$ and \\f$ L_{\\infty} \\f$ norm for the sample value \\f$r(-1) = \\begin{pmatrix} -1 \\\\ 2 \\end{pmatrix}\\f$\nis calculated as follows\n\\f{align*}\n    \\| r(-1) \\|_{L_1} &= |-1| + |2| = 3 \\\\\n    \\| r(-1) \\|_{L_2} &= \\sqrt{(-1)^{2} + (2)^{2}} = \\sqrt{5} \\\\\n    \\| r(-1) \\|_{L_\\infty} &= \\max(|-1|,|2|) = 2\n\\f}\nand for \\f$r(0.5) = \\begin{pmatrix} 0.5 \\\\ 0.5 \\end{pmatrix}\\f$ the calculation is\n\\f{align*}\n    \\| r(0.5) \\|_{L_1} &= |0.5| + |0.5| = 1 \\\\\n    \\| r(0.5) \\|_{L_2} &= \\sqrt{(0.5)^{2} + (0.5)^{2}} = \\sqrt{0.5} \\\\\n    \\| r(0.5) \\|_{L_\\infty} &= \\max(|0.5|,|0.5|) = 0.5.\n\\f}\nThe following graphic shows all values for the three norm functions \\f$\\| r(x) \\|_{L_1}, \\| r(x) \\|_{L_2}\\f$ and \\f$\\| r(x) \\|_{L_\\infty}\\f$.\nIt is notable that the \\f$ L_{1} \\f$ norm forms the upper and the \\f$ L_{\\infty} \\f$ norm forms the lower border for the example function \\f$ r(x) \\f$.\n![Graphs for the different norm functions from the above example](pics/NormTypes_OneArray_1-2-INF.png)\n */\nenum NormTypes { NORM_INF       = 1,\n                 NORM_L1        = 2,\n                 NORM_L2        = 4,\n                 NORM_L2SQR     = 5,\n                 NORM_HAMMING   = 6,\n                 NORM_HAMMING2  = 7,\n                 NORM_TYPE_MASK = 7,\n                 NORM_RELATIVE  = 8, //!< flag\n                 NORM_MINMAX    = 32 //!< flag\n               };\n\n//! comparison types\nenum CmpTypes { CMP_EQ = 0, //!< src1 is equal to src2.\n                CMP_GT = 1, //!< src1 is greater than src2.\n                CMP_GE = 2, //!< src1 is greater than or equal to src2.\n                CMP_LT = 3, //!< src1 is less than src2.\n                CMP_LE = 4, //!< src1 is less than or equal to src2.\n                CMP_NE = 5  //!< src1 is unequal to src2.\n              };\n\n//! generalized matrix multiplication flags\nenum GemmFlags { GEMM_1_T = 1, //!< transposes src1\n                 GEMM_2_T = 2, //!< transposes src2\n                 GEMM_3_T = 4 //!< transposes src3\n               };\n\nenum DftFlags {\n    /** performs an inverse 1D or 2D transform instead of the default forward\n        transform. */\n    DFT_INVERSE        = 1,\n    /** scales the result: divide it by the number of array elements. Normally, it is\n        combined with DFT_INVERSE. */\n    DFT_SCALE          = 2,\n    /** performs a forward or inverse transform of every individual row of the input\n        matrix; this flag enables you to transform multiple vectors simultaneously and can be used to\n        decrease the overhead (which is sometimes several times larger than the processing itself) to\n        perform 3D and higher-dimensional transformations and so forth.*/\n    DFT_ROWS           = 4,\n    /** performs a forward transformation of 1D or 2D real array; the result,\n        though being a complex array, has complex-conjugate symmetry (*CCS*, see the function\n        description below for details), and such an array can be packed into a real array of the same\n        size as input, which is the fastest option and which is what the function does by default;\n        however, you may wish to get a full complex array (for simpler spectrum analysis, and so on) -\n        pass the flag to enable the function to produce a full-size complex output array. */\n    DFT_COMPLEX_OUTPUT = 16,\n    /** performs an inverse transformation of a 1D or 2D complex array; the\n        result is normally a complex array of the same size, however, if the input array has\n        conjugate-complex symmetry (for example, it is a result of forward transformation with\n        DFT_COMPLEX_OUTPUT flag), the output is a real array; while the function itself does not\n        check whether the input is symmetrical or not, you can pass the flag and then the function\n        will assume the symmetry and produce the real output array (note that when the input is packed\n        into a real array and inverse transformation is executed, the function treats the input as a\n        packed complex-conjugate symmetrical array, and the output will also be a real array). */\n    DFT_REAL_OUTPUT    = 32,\n    /** performs an inverse 1D or 2D transform instead of the default forward transform. */\n    DCT_INVERSE        = DFT_INVERSE,\n    /** performs a forward or inverse transform of every individual row of the input\n        matrix. This flag enables you to transform multiple vectors simultaneously and can be used to\n        decrease the overhead (which is sometimes several times larger than the processing itself) to\n        perform 3D and higher-dimensional transforms and so forth.*/\n    DCT_ROWS           = DFT_ROWS\n};\n\n//! Various border types, image boundaries are denoted with `|`\n//! @see borderInterpolate, copyMakeBorder\nenum BorderTypes {\n    BORDER_CONSTANT    = 0, //!< `iiiiii|abcdefgh|iiiiiii`  with some specified `i`\n    BORDER_REPLICATE   = 1, //!< `aaaaaa|abcdefgh|hhhhhhh`\n    BORDER_REFLECT     = 2, //!< `fedcba|abcdefgh|hgfedcb`\n    BORDER_WRAP        = 3, //!< `cdefgh|abcdefgh|abcdefg`\n    BORDER_REFLECT_101 = 4, //!< `gfedcb|abcdefgh|gfedcba`\n    BORDER_TRANSPARENT = 5, //!< `uvwxyz|absdefgh|ijklmno`\n\n    BORDER_REFLECT101  = BORDER_REFLECT_101, //!< same as BORDER_REFLECT_101\n    BORDER_DEFAULT     = BORDER_REFLECT_101, //!< same as BORDER_REFLECT_101\n    BORDER_ISOLATED    = 16 //!< do not look outside of ROI\n};\n\n//! @} core_array\n\n//! @addtogroup core_utils\n//! @{\n\n//! @cond IGNORED\n\n//////////////// static assert /////////////////\n#define CVAUX_CONCAT_EXP(a, b) a##b\n#define CVAUX_CONCAT(a, b) CVAUX_CONCAT_EXP(a,b)\n\n#if defined(__clang__)\n#  ifndef __has_extension\n#    define __has_extension __has_feature /* compatibility, for older versions of clang */\n#  endif\n#  if __has_extension(cxx_static_assert)\n#    define CV_StaticAssert(condition, reason)    static_assert((condition), reason \" \" #condition)\n#  elif __has_extension(c_static_assert)\n#    define CV_StaticAssert(condition, reason)    _Static_assert((condition), reason \" \" #condition)\n#  endif\n#elif defined(__GNUC__)\n#  if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L)\n#    define CV_StaticAssert(condition, reason)    static_assert((condition), reason \" \" #condition)\n#  endif\n#elif defined(_MSC_VER)\n#  if _MSC_VER >= 1600 /* MSVC 10 */\n#    define CV_StaticAssert(condition, reason)    static_assert((condition), reason \" \" #condition)\n#  endif\n#endif\n#ifndef CV_StaticAssert\n#  if !defined(__clang__) && defined(__GNUC__) && (__GNUC__*100 + __GNUC_MINOR__ > 302)\n#    define CV_StaticAssert(condition, reason) ({ extern int __attribute__((error(\"CV_StaticAssert: \" reason \" \" #condition))) CV_StaticAssert(); ((condition) ? 0 : CV_StaticAssert()); })\n#  else\n     template <bool x> struct CV_StaticAssert_failed;\n     template <> struct CV_StaticAssert_failed<true> { enum { val = 1 }; };\n     template<int x> struct CV_StaticAssert_test {};\n#    define CV_StaticAssert(condition, reason)\\\n       typedef cv::CV_StaticAssert_test< sizeof(cv::CV_StaticAssert_failed< static_cast<bool>(condition) >) > CVAUX_CONCAT(CV_StaticAssert_failed_at_, __LINE__)\n#  endif\n#endif\n\n// Suppress warning \"-Wdeprecated-declarations\" / C4996\n#if defined(_MSC_VER)\n    #define CV_DO_PRAGMA(x) __pragma(x)\n#elif defined(__GNUC__)\n    #define CV_DO_PRAGMA(x) _Pragma (#x)\n#else\n    #define CV_DO_PRAGMA(x)\n#endif\n\n#ifdef _MSC_VER\n#define CV_SUPPRESS_DEPRECATED_START \\\n    CV_DO_PRAGMA(warning(push)) \\\n    CV_DO_PRAGMA(warning(disable: 4996))\n#define CV_SUPPRESS_DEPRECATED_END CV_DO_PRAGMA(warning(pop))\n#elif defined (__clang__) || ((__GNUC__)  && (__GNUC__*100 + __GNUC_MINOR__ > 405))\n#define CV_SUPPRESS_DEPRECATED_START \\\n    CV_DO_PRAGMA(GCC diagnostic push) \\\n    CV_DO_PRAGMA(GCC diagnostic ignored \"-Wdeprecated-declarations\")\n#define CV_SUPPRESS_DEPRECATED_END CV_DO_PRAGMA(GCC diagnostic pop)\n#else\n#define CV_SUPPRESS_DEPRECATED_START\n#define CV_SUPPRESS_DEPRECATED_END\n#endif\n#define CV_UNUSED(name) (void)name\n//! @endcond\n\n/*! @brief Signals an error and raises the exception.\n\nBy default the function prints information about the error to stderr,\nthen it either stops if setBreakOnError() had been called before or raises the exception.\nIt is possible to alternate error processing by using redirectError().\n@param _code - error code (Error::Code)\n@param _err - error description\n@param _func - function name. Available only when the compiler supports getting it\n@param _file - source file name where the error has occured\n@param _line - line number in the source file where the error has occured\n@see CV_Error, CV_Error_, CV_ErrorNoReturn, CV_ErrorNoReturn_, CV_Assert, CV_DbgAssert\n */\nCV_EXPORTS void error(int _code, const String& _err, const char* _func, const char* _file, int _line);\n\n#ifdef __GNUC__\n# if defined __clang__ || defined __APPLE__\n#   pragma GCC diagnostic push\n#   pragma GCC diagnostic ignored \"-Winvalid-noreturn\"\n# endif\n#endif\n\n/** same as cv::error, but does not return */\nCV_INLINE CV_NORETURN void errorNoReturn(int _code, const String& _err, const char* _func, const char* _file, int _line)\n{\n    error(_code, _err, _func, _file, _line);\n#ifdef __GNUC__\n# if !defined __clang__ && !defined __APPLE__\n    // this suppresses this warning: \"noreturn\" function does return [enabled by default]\n    __builtin_trap();\n    // or use infinite loop: for (;;) {}\n# endif\n#endif\n}\n#ifdef __GNUC__\n# if defined __clang__ || defined __APPLE__\n#   pragma GCC diagnostic pop\n# endif\n#endif\n\n#if defined __GNUC__\n#define CV_Func __func__\n#elif defined _MSC_VER\n#define CV_Func __FUNCTION__\n#else\n#define CV_Func \"\"\n#endif\n\n/** @brief Call the error handler.\n\nCurrently, the error handler prints the error code and the error message to the standard\nerror stream `stderr`. In the Debug configuration, it then provokes memory access violation, so that\nthe execution stack and all the parameters can be analyzed by the debugger. In the Release\nconfiguration, the exception is thrown.\n\n@param code one of Error::Code\n@param msg error message\n*/\n#define CV_Error( code, msg ) cv::error( code, msg, CV_Func, __FILE__, __LINE__ )\n\n/**  @brief Call the error handler.\n\nThis macro can be used to construct an error message on-fly to include some dynamic information,\nfor example:\n@code\n    // note the extra parentheses around the formatted text message\n    CV_Error_( CV_StsOutOfRange,\n    (\"the value at (%d, %d)=%g is out of range\", badPt.x, badPt.y, badValue));\n@endcode\n@param code one of Error::Code\n@param args printf-like formatted error message in parentheses\n*/\n#define CV_Error_( code, args ) cv::error( code, cv::format args, CV_Func, __FILE__, __LINE__ )\n\n/** @brief Checks a condition at runtime and throws exception if it fails\n\nThe macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression. If it is 0, the macros\nraise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release\nconfigurations while CV_DbgAssert is only retained in the Debug configuration.\n*/\n#define CV_Assert( expr ) if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ )\n\n/** same as CV_Error(code,msg), but does not return */\n#define CV_ErrorNoReturn( code, msg ) cv::errorNoReturn( code, msg, CV_Func, __FILE__, __LINE__ )\n\n/** same as CV_Error_(code,args), but does not return */\n#define CV_ErrorNoReturn_( code, args ) cv::errorNoReturn( code, cv::format args, CV_Func, __FILE__, __LINE__ )\n\n/** replaced with CV_Assert(expr) in Debug configuration */\n#ifdef _DEBUG\n#  define CV_DbgAssert(expr) CV_Assert(expr)\n#else\n#  define CV_DbgAssert(expr)\n#endif\n\n/*\n * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor\n * bit count of A exclusive XOR'ed with B\n */\nstruct CV_EXPORTS Hamming\n{\n    enum { normType = NORM_HAMMING };\n    typedef unsigned char ValueType;\n    typedef int ResultType;\n\n    /** this will count the bits in a ^ b\n     */\n    ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const;\n};\n\ntypedef Hamming HammingLUT;\n\n/////////////////////////////////// inline norms ////////////////////////////////////\n\ntemplate<typename _Tp> inline _Tp cv_abs(_Tp x) { return std::abs(x); }\ninline int cv_abs(uchar x) { return x; }\ninline int cv_abs(schar x) { return std::abs(x); }\ninline int cv_abs(ushort x) { return x; }\ninline int cv_abs(short x) { return std::abs(x); }\n\ntemplate<typename _Tp, typename _AccTp> static inline\n_AccTp normL2Sqr(const _Tp* a, int n)\n{\n    _AccTp s = 0;\n    int i=0;\n#if CV_ENABLE_UNROLLED\n    for( ; i <= n - 4; i += 4 )\n    {\n        _AccTp v0 = a[i], v1 = a[i+1], v2 = a[i+2], v3 = a[i+3];\n        s += v0*v0 + v1*v1 + v2*v2 + v3*v3;\n    }\n#endif\n    for( ; i < n; i++ )\n    {\n        _AccTp v = a[i];\n        s += v*v;\n    }\n    return s;\n}\n\ntemplate<typename _Tp, typename _AccTp> static inline\n_AccTp normL1(const _Tp* a, int n)\n{\n    _AccTp s = 0;\n    int i = 0;\n#if CV_ENABLE_UNROLLED\n    for(; i <= n - 4; i += 4 )\n    {\n        s += (_AccTp)cv_abs(a[i]) + (_AccTp)cv_abs(a[i+1]) +\n            (_AccTp)cv_abs(a[i+2]) + (_AccTp)cv_abs(a[i+3]);\n    }\n#endif\n    for( ; i < n; i++ )\n        s += cv_abs(a[i]);\n    return s;\n}\n\ntemplate<typename _Tp, typename _AccTp> static inline\n_AccTp normInf(const _Tp* a, int n)\n{\n    _AccTp s = 0;\n    for( int i = 0; i < n; i++ )\n        s = std::max(s, (_AccTp)cv_abs(a[i]));\n    return s;\n}\n\ntemplate<typename _Tp, typename _AccTp> static inline\n_AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n)\n{\n    _AccTp s = 0;\n    int i= 0;\n#if CV_ENABLE_UNROLLED\n    for(; i <= n - 4; i += 4 )\n    {\n        _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);\n        s += v0*v0 + v1*v1 + v2*v2 + v3*v3;\n    }\n#endif\n    for( ; i < n; i++ )\n    {\n        _AccTp v = _AccTp(a[i] - b[i]);\n        s += v*v;\n    }\n    return s;\n}\n\nstatic inline float normL2Sqr(const float* a, const float* b, int n)\n{\n    float s = 0.f;\n    for( int i = 0; i < n; i++ )\n    {\n        float v = a[i] - b[i];\n        s += v*v;\n    }\n    return s;\n}\n\ntemplate<typename _Tp, typename _AccTp> static inline\n_AccTp normL1(const _Tp* a, const _Tp* b, int n)\n{\n    _AccTp s = 0;\n    int i= 0;\n#if CV_ENABLE_UNROLLED\n    for(; i <= n - 4; i += 4 )\n    {\n        _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);\n        s += std::abs(v0) + std::abs(v1) + std::abs(v2) + std::abs(v3);\n    }\n#endif\n    for( ; i < n; i++ )\n    {\n        _AccTp v = _AccTp(a[i] - b[i]);\n        s += std::abs(v);\n    }\n    return s;\n}\n\ninline float normL1(const float* a, const float* b, int n)\n{\n    float s = 0.f;\n    for( int i = 0; i < n; i++ )\n    {\n        s += std::abs(a[i] - b[i]);\n    }\n    return s;\n}\n\ninline int normL1(const uchar* a, const uchar* b, int n)\n{\n    int s = 0;\n    for( int i = 0; i < n; i++ )\n    {\n        s += std::abs(a[i] - b[i]);\n    }\n    return s;\n}\n\ntemplate<typename _Tp, typename _AccTp> static inline\n_AccTp normInf(const _Tp* a, const _Tp* b, int n)\n{\n    _AccTp s = 0;\n    for( int i = 0; i < n; i++ )\n    {\n        _AccTp v0 = a[i] - b[i];\n        s = std::max(s, std::abs(v0));\n    }\n    return s;\n}\n\n/** @brief Computes the cube root of an argument.\n\n The function cubeRoot computes \\f$\\sqrt[3]{\\texttt{val}}\\f$. Negative arguments are handled correctly.\n NaN and Inf are not handled. The accuracy approaches the maximum possible accuracy for\n single-precision data.\n @param val A function argument.\n */\nCV_EXPORTS_W float cubeRoot(float val);\n\n/** @brief Calculates the angle of a 2D vector in degrees.\n\n The function fastAtan2 calculates the full-range angle of an input 2D vector. The angle is measured\n in degrees and varies from 0 to 360 degrees. The accuracy is about 0.3 degrees.\n @param x x-coordinate of the vector.\n @param y y-coordinate of the vector.\n */\nCV_EXPORTS_W float fastAtan2(float y, float x);\n\n/** proxy for hal::LU */\nCV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n);\n/** proxy for hal::LU */\nCV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n);\n/** proxy for hal::Cholesky */\nCV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n);\n/** proxy for hal::Cholesky */\nCV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n);\n\n////////////////// forward declarations for important OpenCV types //////////////////\n\n//! @cond IGNORED\n\ntemplate<typename _Tp, int cn> class Vec;\ntemplate<typename _Tp, int m, int n> class Matx;\n\ntemplate<typename _Tp> class Complex;\ntemplate<typename _Tp> class Point_;\ntemplate<typename _Tp> class Point3_;\ntemplate<typename _Tp> class Size_;\ntemplate<typename _Tp> class Rect_;\ntemplate<typename _Tp> class Scalar_;\n\nclass CV_EXPORTS RotatedRect;\nclass CV_EXPORTS Range;\nclass CV_EXPORTS TermCriteria;\nclass CV_EXPORTS KeyPoint;\nclass CV_EXPORTS DMatch;\nclass CV_EXPORTS RNG;\n\nclass CV_EXPORTS Mat;\nclass CV_EXPORTS MatExpr;\n\nclass CV_EXPORTS UMat;\n\nclass CV_EXPORTS SparseMat;\ntypedef Mat MatND;\n\ntemplate<typename _Tp> class Mat_;\ntemplate<typename _Tp> class SparseMat_;\n\nclass CV_EXPORTS MatConstIterator;\nclass CV_EXPORTS SparseMatIterator;\nclass CV_EXPORTS SparseMatConstIterator;\ntemplate<typename _Tp> class MatIterator_;\ntemplate<typename _Tp> class MatConstIterator_;\ntemplate<typename _Tp> class SparseMatIterator_;\ntemplate<typename _Tp> class SparseMatConstIterator_;\n\nnamespace ogl\n{\n    class CV_EXPORTS Buffer;\n    class CV_EXPORTS Texture2D;\n    class CV_EXPORTS Arrays;\n}\n\nnamespace cuda\n{\n    class CV_EXPORTS GpuMat;\n    class CV_EXPORTS HostMem;\n    class CV_EXPORTS Stream;\n    class CV_EXPORTS Event;\n}\n\nnamespace cudev\n{\n    template <typename _Tp> class GpuMat_;\n}\n\nnamespace ipp\n{\nCV_EXPORTS int getIppFeatures();\nCV_EXPORTS void setIppStatus(int status, const char * const funcname = NULL, const char * const filename = NULL,\n                             int line = 0);\nCV_EXPORTS int getIppStatus();\nCV_EXPORTS String getIppErrorLocation();\nCV_EXPORTS bool useIPP();\nCV_EXPORTS void setUseIPP(bool flag);\n\n} // ipp\n\n//! @endcond\n\n//! @} core_utils\n\n\n\n\n} // cv\n\n#include \"opencv2/core/neon_utils.hpp\"\n\n#endif //__OPENCV_CORE_BASE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/bufferpool.hpp",
    "content": "// This file is part of OpenCV project.\n// It is subject to the license terms in the LICENSE file found in the top-level directory\n// of this distribution and at http://opencv.org/license.html.\n//\n// Copyright (C) 2014, Advanced Micro Devices, Inc., all rights reserved.\n\n#ifndef __OPENCV_CORE_BUFFER_POOL_HPP__\n#define __OPENCV_CORE_BUFFER_POOL_HPP__\n\nnamespace cv\n{\n\n//! @addtogroup core\n//! @{\n\nclass BufferPoolController\n{\nprotected:\n    ~BufferPoolController() { }\npublic:\n    virtual size_t getReservedSize() const = 0;\n    virtual size_t getMaxReservedSize() const = 0;\n    virtual void setMaxReservedSize(size_t size) = 0;\n    virtual void freeAllReservedBuffers() = 0;\n};\n\n//! @}\n\n}\n\n#endif // __OPENCV_CORE_BUFFER_POOL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/core.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/core.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/core_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n\n#ifndef __OPENCV_CORE_C_H__\n#define __OPENCV_CORE_C_H__\n\n#include \"opencv2/core/types_c.h\"\n\n#ifdef __cplusplus\n#  ifdef _MSC_VER\n/* disable warning C4190: 'function' has C-linkage specified, but returns UDT 'typename'\n                          which is incompatible with C\n\n   It is OK to disable it because we only extend few plain structures with\n   C++ construrtors for simpler interoperability with C++ API of the library\n*/\n#    pragma warning(disable:4190)\n#  elif defined __clang__ && __clang_major__ >= 3\n#    pragma GCC diagnostic ignored \"-Wreturn-type-c-linkage\"\n#  endif\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/** @addtogroup core_c\n    @{\n*/\n\n/****************************************************************************************\\\n*          Array allocation, deallocation, initialization and access to elements         *\n\\****************************************************************************************/\n\n/** `malloc` wrapper.\n   If there is no enough memory, the function\n   (as well as other OpenCV functions that call cvAlloc)\n   raises an error. */\nCVAPI(void*)  cvAlloc( size_t size );\n\n/** `free` wrapper.\n   Here and further all the memory releasing functions\n   (that all call cvFree) take double pointer in order to\n   to clear pointer to the data after releasing it.\n   Passing pointer to NULL pointer is Ok: nothing happens in this case\n*/\nCVAPI(void)   cvFree_( void* ptr );\n#define cvFree(ptr) (cvFree_(*(ptr)), *(ptr)=0)\n\n/** @brief Creates an image header but does not allocate the image data.\n\n@param size Image width and height\n@param depth Image depth (see cvCreateImage )\n@param channels Number of channels (see cvCreateImage )\n */\nCVAPI(IplImage*)  cvCreateImageHeader( CvSize size, int depth, int channels );\n\n/** @brief Initializes an image header that was previously allocated.\n\nThe returned IplImage\\* points to the initialized header.\n@param image Image header to initialize\n@param size Image width and height\n@param depth Image depth (see cvCreateImage )\n@param channels Number of channels (see cvCreateImage )\n@param origin Top-left IPL_ORIGIN_TL or bottom-left IPL_ORIGIN_BL\n@param align Alignment for image rows, typically 4 or 8 bytes\n */\nCVAPI(IplImage*) cvInitImageHeader( IplImage* image, CvSize size, int depth,\n                                   int channels, int origin CV_DEFAULT(0),\n                                   int align CV_DEFAULT(4));\n\n/** @brief Creates an image header and allocates the image data.\n\nThis function call is equivalent to the following code:\n@code\n    header = cvCreateImageHeader(size, depth, channels);\n    cvCreateData(header);\n@endcode\n@param size Image width and height\n@param depth Bit depth of image elements. See IplImage for valid depths.\n@param channels Number of channels per pixel. See IplImage for details. This function only creates\nimages with interleaved channels.\n */\nCVAPI(IplImage*)  cvCreateImage( CvSize size, int depth, int channels );\n\n/** @brief Deallocates an image header.\n\nThis call is an analogue of :\n@code\n    if(image )\n    {\n        iplDeallocate(*image, IPL_IMAGE_HEADER | IPL_IMAGE_ROI);\n        *image = 0;\n    }\n@endcode\nbut it does not use IPL functions by default (see the CV_TURN_ON_IPL_COMPATIBILITY macro).\n@param image Double pointer to the image header\n */\nCVAPI(void)  cvReleaseImageHeader( IplImage** image );\n\n/** @brief Deallocates the image header and the image data.\n\nThis call is a shortened form of :\n@code\n    if(*image )\n    {\n        cvReleaseData(*image);\n        cvReleaseImageHeader(image);\n    }\n@endcode\n@param image Double pointer to the image header\n*/\nCVAPI(void)  cvReleaseImage( IplImage** image );\n\n/** Creates a copy of IPL image (widthStep may differ) */\nCVAPI(IplImage*) cvCloneImage( const IplImage* image );\n\n/** @brief Sets the channel of interest in an IplImage.\n\nIf the ROI is set to NULL and the coi is *not* 0, the ROI is allocated. Most OpenCV functions do\n*not* support the COI setting, so to process an individual image/matrix channel one may copy (via\ncvCopy or cvSplit) the channel to a separate image/matrix, process it and then copy the result\nback (via cvCopy or cvMerge) if needed.\n@param image A pointer to the image header\n@param coi The channel of interest. 0 - all channels are selected, 1 - first channel is selected,\netc. Note that the channel indices become 1-based.\n */\nCVAPI(void)  cvSetImageCOI( IplImage* image, int coi );\n\n/** @brief Returns the index of the channel of interest.\n\nReturns the channel of interest of in an IplImage. Returned values correspond to the coi in\ncvSetImageCOI.\n@param image A pointer to the image header\n */\nCVAPI(int)  cvGetImageCOI( const IplImage* image );\n\n/** @brief Sets an image Region Of Interest (ROI) for a given rectangle.\n\nIf the original image ROI was NULL and the rect is not the whole image, the ROI structure is\nallocated.\n\nMost OpenCV functions support the use of ROI and treat the image rectangle as a separate image. For\nexample, all of the pixel coordinates are counted from the top-left (or bottom-left) corner of the\nROI, not the original image.\n@param image A pointer to the image header\n@param rect The ROI rectangle\n */\nCVAPI(void)  cvSetImageROI( IplImage* image, CvRect rect );\n\n/** @brief Resets the image ROI to include the entire image and releases the ROI structure.\n\nThis produces a similar result to the following, but in addition it releases the ROI structure. :\n@code\n    cvSetImageROI(image, cvRect(0, 0, image->width, image->height ));\n    cvSetImageCOI(image, 0);\n@endcode\n@param image A pointer to the image header\n */\nCVAPI(void)  cvResetImageROI( IplImage* image );\n\n/** @brief Returns the image ROI.\n\nIf there is no ROI set, cvRect(0,0,image-\\>width,image-\\>height) is returned.\n@param image A pointer to the image header\n */\nCVAPI(CvRect) cvGetImageROI( const IplImage* image );\n\n/** @brief Creates a matrix header but does not allocate the matrix data.\n\nThe function allocates a new matrix header and returns a pointer to it. The matrix data can then be\nallocated using cvCreateData or set explicitly to user-allocated data via cvSetData.\n@param rows Number of rows in the matrix\n@param cols Number of columns in the matrix\n@param type Type of the matrix elements, see cvCreateMat\n */\nCVAPI(CvMat*)  cvCreateMatHeader( int rows, int cols, int type );\n\n#define CV_AUTOSTEP  0x7fffffff\n\n/** @brief Initializes a pre-allocated matrix header.\n\nThis function is often used to process raw data with OpenCV matrix functions. For example, the\nfollowing code computes the matrix product of two matrices, stored as ordinary arrays:\n@code\n    double a[] = { 1, 2, 3, 4,\n                   5, 6, 7, 8,\n                   9, 10, 11, 12 };\n\n    double b[] = { 1, 5, 9,\n                   2, 6, 10,\n                   3, 7, 11,\n                   4, 8, 12 };\n\n    double c[9];\n    CvMat Ma, Mb, Mc ;\n\n    cvInitMatHeader(&Ma, 3, 4, CV_64FC1, a);\n    cvInitMatHeader(&Mb, 4, 3, CV_64FC1, b);\n    cvInitMatHeader(&Mc, 3, 3, CV_64FC1, c);\n\n    cvMatMulAdd(&Ma, &Mb, 0, &Mc);\n    // the c array now contains the product of a (3x4) and b (4x3)\n@endcode\n@param mat A pointer to the matrix header to be initialized\n@param rows Number of rows in the matrix\n@param cols Number of columns in the matrix\n@param type Type of the matrix elements, see cvCreateMat .\n@param data Optional: data pointer assigned to the matrix header\n@param step Optional: full row width in bytes of the assigned data. By default, the minimal\npossible step is used which assumes there are no gaps between subsequent rows of the matrix.\n */\nCVAPI(CvMat*) cvInitMatHeader( CvMat* mat, int rows, int cols,\n                              int type, void* data CV_DEFAULT(NULL),\n                              int step CV_DEFAULT(CV_AUTOSTEP) );\n\n/** @brief Creates a matrix header and allocates the matrix data.\n\nThe function call is equivalent to the following code:\n@code\n    CvMat* mat = cvCreateMatHeader(rows, cols, type);\n    cvCreateData(mat);\n@endcode\n@param rows Number of rows in the matrix\n@param cols Number of columns in the matrix\n@param type The type of the matrix elements in the form\nCV_\\<bit depth\\>\\<S|U|F\\>C\\<number of channels\\> , where S=signed, U=unsigned, F=float. For\nexample, CV _ 8UC1 means the elements are 8-bit unsigned and the there is 1 channel, and CV _\n32SC2 means the elements are 32-bit signed and there are 2 channels.\n */\nCVAPI(CvMat*)  cvCreateMat( int rows, int cols, int type );\n\n/** @brief Deallocates a matrix.\n\nThe function decrements the matrix data reference counter and deallocates matrix header. If the data\nreference counter is 0, it also deallocates the data. :\n@code\n    if(*mat )\n        cvDecRefData(*mat);\n    cvFree((void**)mat);\n@endcode\n@param mat Double pointer to the matrix\n */\nCVAPI(void)  cvReleaseMat( CvMat** mat );\n\n/** @brief Decrements an array data reference counter.\n\nThe function decrements the data reference counter in a CvMat or CvMatND if the reference counter\n\npointer is not NULL. If the counter reaches zero, the data is deallocated. In the current\nimplementation the reference counter is not NULL only if the data was allocated using the\ncvCreateData function. The counter will be NULL in other cases such as: external data was assigned\nto the header using cvSetData, header is part of a larger matrix or image, or the header was\nconverted from an image or n-dimensional matrix header.\n@param arr Pointer to an array header\n */\nCV_INLINE  void  cvDecRefData( CvArr* arr )\n{\n    if( CV_IS_MAT( arr ))\n    {\n        CvMat* mat = (CvMat*)arr;\n        mat->data.ptr = NULL;\n        if( mat->refcount != NULL && --*mat->refcount == 0 )\n            cvFree( &mat->refcount );\n        mat->refcount = NULL;\n    }\n    else if( CV_IS_MATND( arr ))\n    {\n        CvMatND* mat = (CvMatND*)arr;\n        mat->data.ptr = NULL;\n        if( mat->refcount != NULL && --*mat->refcount == 0 )\n            cvFree( &mat->refcount );\n        mat->refcount = NULL;\n    }\n}\n\n/** @brief Increments array data reference counter.\n\nThe function increments CvMat or CvMatND data reference counter and returns the new counter value if\nthe reference counter pointer is not NULL, otherwise it returns zero.\n@param arr Array header\n */\nCV_INLINE  int  cvIncRefData( CvArr* arr )\n{\n    int refcount = 0;\n    if( CV_IS_MAT( arr ))\n    {\n        CvMat* mat = (CvMat*)arr;\n        if( mat->refcount != NULL )\n            refcount = ++*mat->refcount;\n    }\n    else if( CV_IS_MATND( arr ))\n    {\n        CvMatND* mat = (CvMatND*)arr;\n        if( mat->refcount != NULL )\n            refcount = ++*mat->refcount;\n    }\n    return refcount;\n}\n\n\n/** Creates an exact copy of the input matrix (except, may be, step value) */\nCVAPI(CvMat*) cvCloneMat( const CvMat* mat );\n\n\n/** @brief Returns matrix header corresponding to the rectangular sub-array of input image or matrix.\n\nThe function returns header, corresponding to a specified rectangle of the input array. In other\n\nwords, it allows the user to treat a rectangular part of input array as a stand-alone array. ROI is\ntaken into account by the function so the sub-array of ROI is actually extracted.\n@param arr Input array\n@param submat Pointer to the resultant sub-array header\n@param rect Zero-based coordinates of the rectangle of interest\n */\nCVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect );\n#define cvGetSubArr cvGetSubRect\n\n/** @brief Returns array row or row span.\n\nThe functions return the header, corresponding to a specified row/row span of the input array.\ncvGetRow(arr, submat, row) is a shortcut for cvGetRows(arr, submat, row, row+1).\n@param arr Input array\n@param submat Pointer to the resulting sub-array header\n@param start_row Zero-based index of the starting row (inclusive) of the span\n@param end_row Zero-based index of the ending row (exclusive) of the span\n@param delta_row Index step in the row span. That is, the function extracts every delta_row -th\nrow from start_row and up to (but not including) end_row .\n */\nCVAPI(CvMat*) cvGetRows( const CvArr* arr, CvMat* submat,\n                        int start_row, int end_row,\n                        int delta_row CV_DEFAULT(1));\n\n/** @overload\n@param arr Input array\n@param submat Pointer to the resulting sub-array header\n@param row Zero-based index of the selected row\n*/\nCV_INLINE  CvMat*  cvGetRow( const CvArr* arr, CvMat* submat, int row )\n{\n    return cvGetRows( arr, submat, row, row + 1, 1 );\n}\n\n\n/** @brief Returns one of more array columns.\n\nThe functions return the header, corresponding to a specified column span of the input array. That\n\nis, no data is copied. Therefore, any modifications of the submatrix will affect the original array.\nIf you need to copy the columns, use cvCloneMat. cvGetCol(arr, submat, col) is a shortcut for\ncvGetCols(arr, submat, col, col+1).\n@param arr Input array\n@param submat Pointer to the resulting sub-array header\n@param start_col Zero-based index of the starting column (inclusive) of the span\n@param end_col Zero-based index of the ending column (exclusive) of the span\n */\nCVAPI(CvMat*) cvGetCols( const CvArr* arr, CvMat* submat,\n                        int start_col, int end_col );\n\n/** @overload\n@param arr Input array\n@param submat Pointer to the resulting sub-array header\n@param col Zero-based index of the selected column\n*/\nCV_INLINE  CvMat*  cvGetCol( const CvArr* arr, CvMat* submat, int col )\n{\n    return cvGetCols( arr, submat, col, col + 1 );\n}\n\n/** @brief Returns one of array diagonals.\n\nThe function returns the header, corresponding to a specified diagonal of the input array.\n@param arr Input array\n@param submat Pointer to the resulting sub-array header\n@param diag Index of the array diagonal. Zero value corresponds to the main diagonal, -1\ncorresponds to the diagonal above the main, 1 corresponds to the diagonal below the main, and so\nforth.\n */\nCVAPI(CvMat*) cvGetDiag( const CvArr* arr, CvMat* submat,\n                            int diag CV_DEFAULT(0));\n\n/** low-level scalar <-> raw data conversion functions */\nCVAPI(void) cvScalarToRawData( const CvScalar* scalar, void* data, int type,\n                              int extend_to_12 CV_DEFAULT(0) );\n\nCVAPI(void) cvRawDataToScalar( const void* data, int type, CvScalar* scalar );\n\n/** @brief Creates a new matrix header but does not allocate the matrix data.\n\nThe function allocates a header for a multi-dimensional dense array. The array data can further be\nallocated using cvCreateData or set explicitly to user-allocated data via cvSetData.\n@param dims Number of array dimensions\n@param sizes Array of dimension sizes\n@param type Type of array elements, see cvCreateMat\n */\nCVAPI(CvMatND*)  cvCreateMatNDHeader( int dims, const int* sizes, int type );\n\n/** @brief Creates the header and allocates the data for a multi-dimensional dense array.\n\nThis function call is equivalent to the following code:\n@code\n    CvMatND* mat = cvCreateMatNDHeader(dims, sizes, type);\n    cvCreateData(mat);\n@endcode\n@param dims Number of array dimensions. This must not exceed CV_MAX_DIM (32 by default, but can be\nchanged at build time).\n@param sizes Array of dimension sizes.\n@param type Type of array elements, see cvCreateMat .\n */\nCVAPI(CvMatND*)  cvCreateMatND( int dims, const int* sizes, int type );\n\n/** @brief Initializes a pre-allocated multi-dimensional array header.\n\n@param mat A pointer to the array header to be initialized\n@param dims The number of array dimensions\n@param sizes An array of dimension sizes\n@param type Type of array elements, see cvCreateMat\n@param data Optional data pointer assigned to the matrix header\n */\nCVAPI(CvMatND*)  cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes,\n                                    int type, void* data CV_DEFAULT(NULL) );\n\n/** @brief Deallocates a multi-dimensional array.\n\nThe function decrements the array data reference counter and releases the array header. If the\nreference counter reaches 0, it also deallocates the data. :\n@code\n    if(*mat )\n        cvDecRefData(*mat);\n    cvFree((void**)mat);\n@endcode\n@param mat Double pointer to the array\n */\nCV_INLINE  void  cvReleaseMatND( CvMatND** mat )\n{\n    cvReleaseMat( (CvMat**)mat );\n}\n\n/** Creates a copy of CvMatND (except, may be, steps) */\nCVAPI(CvMatND*) cvCloneMatND( const CvMatND* mat );\n\n/** @brief Creates sparse array.\n\nThe function allocates a multi-dimensional sparse array. Initially the array contain no elements,\nthat is PtrND and other related functions will return 0 for every index.\n@param dims Number of array dimensions. In contrast to the dense matrix, the number of dimensions is\npractically unlimited (up to \\f$2^{16}\\f$ ).\n@param sizes Array of dimension sizes\n@param type Type of array elements. The same as for CvMat\n */\nCVAPI(CvSparseMat*)  cvCreateSparseMat( int dims, const int* sizes, int type );\n\n/** @brief Deallocates sparse array.\n\nThe function releases the sparse array and clears the array pointer upon exit.\n@param mat Double pointer to the array\n */\nCVAPI(void)  cvReleaseSparseMat( CvSparseMat** mat );\n\n/** Creates a copy of CvSparseMat (except, may be, zero items) */\nCVAPI(CvSparseMat*) cvCloneSparseMat( const CvSparseMat* mat );\n\n/** @brief Initializes sparse array elements iterator.\n\nThe function initializes iterator of sparse array elements and returns pointer to the first element,\nor NULL if the array is empty.\n@param mat Input array\n@param mat_iterator Initialized iterator\n */\nCVAPI(CvSparseNode*) cvInitSparseMatIterator( const CvSparseMat* mat,\n                                              CvSparseMatIterator* mat_iterator );\n\n/** @brief Returns the next sparse matrix element\n\nThe function moves iterator to the next sparse matrix element and returns pointer to it. In the\ncurrent version there is no any particular order of the elements, because they are stored in the\nhash table. The sample below demonstrates how to iterate through the sparse matrix:\n@code\n    // print all the non-zero sparse matrix elements and compute their sum\n    double sum = 0;\n    int i, dims = cvGetDims(sparsemat);\n    CvSparseMatIterator it;\n    CvSparseNode* node = cvInitSparseMatIterator(sparsemat, &it);\n\n    for(; node != 0; node = cvGetNextSparseNode(&it))\n    {\n        int* idx = CV_NODE_IDX(array, node);\n        float val = *(float*)CV_NODE_VAL(array, node);\n        printf(\"M\");\n        for(i = 0; i < dims; i++ )\n            printf(\"[%d]\", idx[i]);\n        printf(\"=%g\\n\", val);\n\n        sum += val;\n    }\n\n    printf(\"nTotal sum = %g\\n\", sum);\n@endcode\n@param mat_iterator Sparse array iterator\n */\nCV_INLINE CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator )\n{\n    if( mat_iterator->node->next )\n        return mat_iterator->node = mat_iterator->node->next;\n    else\n    {\n        int idx;\n        for( idx = ++mat_iterator->curidx; idx < mat_iterator->mat->hashsize; idx++ )\n        {\n            CvSparseNode* node = (CvSparseNode*)mat_iterator->mat->hashtable[idx];\n            if( node )\n            {\n                mat_iterator->curidx = idx;\n                return mat_iterator->node = node;\n            }\n        }\n        return NULL;\n    }\n}\n\n\n#define CV_MAX_ARR 10\n\n/** matrix iterator: used for n-ary operations on dense arrays */\ntypedef struct CvNArrayIterator\n{\n    int count; /**< number of arrays */\n    int dims; /**< number of dimensions to iterate */\n    CvSize size; /**< maximal common linear size: { width = size, height = 1 } */\n    uchar* ptr[CV_MAX_ARR]; /**< pointers to the array slices */\n    int stack[CV_MAX_DIM]; /**< for internal use */\n    CvMatND* hdr[CV_MAX_ARR]; /**< pointers to the headers of the\n                                 matrices that are processed */\n}\nCvNArrayIterator;\n\n#define CV_NO_DEPTH_CHECK     1\n#define CV_NO_CN_CHECK        2\n#define CV_NO_SIZE_CHECK      4\n\n/** initializes iterator that traverses through several arrays simulteneously\n   (the function together with cvNextArraySlice is used for\n    N-ari element-wise operations) */\nCVAPI(int) cvInitNArrayIterator( int count, CvArr** arrs,\n                                 const CvArr* mask, CvMatND* stubs,\n                                 CvNArrayIterator* array_iterator,\n                                 int flags CV_DEFAULT(0) );\n\n/** returns zero value if iteration is finished, non-zero (slice length) otherwise */\nCVAPI(int) cvNextNArraySlice( CvNArrayIterator* array_iterator );\n\n\n/** @brief Returns type of array elements.\n\nThe function returns type of the array elements. In the case of IplImage the type is converted to\nCvMat-like representation. For example, if the image has been created as:\n@code\n    IplImage* img = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);\n@endcode\nThe code cvGetElemType(img) will return CV_8UC3.\n@param arr Input array\n */\nCVAPI(int) cvGetElemType( const CvArr* arr );\n\n/** @brief Return number of array dimensions\n\nThe function returns the array dimensionality and the array of dimension sizes. In the case of\nIplImage or CvMat it always returns 2 regardless of number of image/matrix rows. For example, the\nfollowing code calculates total number of array elements:\n@code\n    int sizes[CV_MAX_DIM];\n    int i, total = 1;\n    int dims = cvGetDims(arr, size);\n    for(i = 0; i < dims; i++ )\n        total *= sizes[i];\n@endcode\n@param arr Input array\n@param sizes Optional output vector of the array dimension sizes. For 2d arrays the number of rows\n(height) goes first, number of columns (width) next.\n */\nCVAPI(int) cvGetDims( const CvArr* arr, int* sizes CV_DEFAULT(NULL) );\n\n\n/** @brief Returns array size along the specified dimension.\n\n@param arr Input array\n@param index Zero-based dimension index (for matrices 0 means number of rows, 1 means number of\ncolumns; for images 0 means height, 1 means width)\n */\nCVAPI(int) cvGetDimSize( const CvArr* arr, int index );\n\n\n/** @brief Return pointer to a particular array element.\n\nThe functions return a pointer to a specific array element. Number of array dimension should match\nto the number of indices passed to the function except for cvPtr1D function that can be used for\nsequential access to 1D, 2D or nD dense arrays.\n\nThe functions can be used for sparse arrays as well - if the requested node does not exist they\ncreate it and set it to zero.\n\nAll these as well as other functions accessing array elements ( cvGetND , cvGetRealND , cvSet\n, cvSetND , cvSetRealND ) raise an error in case if the element index is out of range.\n@param arr Input array\n@param idx0 The first zero-based component of the element index\n@param type Optional output parameter: type of matrix elements\n */\nCVAPI(uchar*) cvPtr1D( const CvArr* arr, int idx0, int* type CV_DEFAULT(NULL));\n/** @overload */\nCVAPI(uchar*) cvPtr2D( const CvArr* arr, int idx0, int idx1, int* type CV_DEFAULT(NULL) );\n/** @overload */\nCVAPI(uchar*) cvPtr3D( const CvArr* arr, int idx0, int idx1, int idx2,\n                      int* type CV_DEFAULT(NULL));\n/** @overload\n@param arr Input array\n@param idx Array of the element indices\n@param type Optional output parameter: type of matrix elements\n@param create_node Optional input parameter for sparse matrices. Non-zero value of the parameter\nmeans that the requested element is created if it does not exist already.\n@param precalc_hashval Optional input parameter for sparse matrices. If the pointer is not NULL,\nthe function does not recalculate the node hash value, but takes it from the specified location.\nIt is useful for speeding up pair-wise operations (TODO: provide an example)\n*/\nCVAPI(uchar*) cvPtrND( const CvArr* arr, const int* idx, int* type CV_DEFAULT(NULL),\n                      int create_node CV_DEFAULT(1),\n                      unsigned* precalc_hashval CV_DEFAULT(NULL));\n\n/** @brief Return a specific array element.\n\nThe functions return a specific array element. In the case of a sparse array the functions return 0\nif the requested node does not exist (no new node is created by the functions).\n@param arr Input array\n@param idx0 The first zero-based component of the element index\n */\nCVAPI(CvScalar) cvGet1D( const CvArr* arr, int idx0 );\n/** @overload */\nCVAPI(CvScalar) cvGet2D( const CvArr* arr, int idx0, int idx1 );\n/** @overload */\nCVAPI(CvScalar) cvGet3D( const CvArr* arr, int idx0, int idx1, int idx2 );\n/** @overload\n@param arr Input array\n@param idx Array of the element indices\n*/\nCVAPI(CvScalar) cvGetND( const CvArr* arr, const int* idx );\n\n/** @brief Return a specific element of single-channel 1D, 2D, 3D or nD array.\n\nReturns a specific element of a single-channel array. If the array has multiple channels, a runtime\nerror is raised. Note that Get?D functions can be used safely for both single-channel and\nmultiple-channel arrays though they are a bit slower.\n\nIn the case of a sparse array the functions return 0 if the requested node does not exist (no new\nnode is created by the functions).\n@param arr Input array. Must have a single channel.\n@param idx0 The first zero-based component of the element index\n */\nCVAPI(double) cvGetReal1D( const CvArr* arr, int idx0 );\n/** @overload */\nCVAPI(double) cvGetReal2D( const CvArr* arr, int idx0, int idx1 );\n/** @overload */\nCVAPI(double) cvGetReal3D( const CvArr* arr, int idx0, int idx1, int idx2 );\n/** @overload\n@param arr Input array. Must have a single channel.\n@param idx Array of the element indices\n*/\nCVAPI(double) cvGetRealND( const CvArr* arr, const int* idx );\n\n/** @brief Change the particular array element.\n\nThe functions assign the new value to a particular array element. In the case of a sparse array the\nfunctions create the node if it does not exist yet.\n@param arr Input array\n@param idx0 The first zero-based component of the element index\n@param value The assigned value\n */\nCVAPI(void) cvSet1D( CvArr* arr, int idx0, CvScalar value );\n/** @overload */\nCVAPI(void) cvSet2D( CvArr* arr, int idx0, int idx1, CvScalar value );\n/** @overload */\nCVAPI(void) cvSet3D( CvArr* arr, int idx0, int idx1, int idx2, CvScalar value );\n/** @overload\n@param arr Input array\n@param idx Array of the element indices\n@param value The assigned value\n*/\nCVAPI(void) cvSetND( CvArr* arr, const int* idx, CvScalar value );\n\n/** @brief Change a specific array element.\n\nThe functions assign a new value to a specific element of a single-channel array. If the array has\nmultiple channels, a runtime error is raised. Note that the Set\\*D function can be used safely for\nboth single-channel and multiple-channel arrays, though they are a bit slower.\n\nIn the case of a sparse array the functions create the node if it does not yet exist.\n@param arr Input array\n@param idx0 The first zero-based component of the element index\n@param value The assigned value\n */\nCVAPI(void) cvSetReal1D( CvArr* arr, int idx0, double value );\n/** @overload */\nCVAPI(void) cvSetReal2D( CvArr* arr, int idx0, int idx1, double value );\n/** @overload */\nCVAPI(void) cvSetReal3D( CvArr* arr, int idx0,\n                        int idx1, int idx2, double value );\n/** @overload\n@param arr Input array\n@param idx Array of the element indices\n@param value The assigned value\n*/\nCVAPI(void) cvSetRealND( CvArr* arr, const int* idx, double value );\n\n/** clears element of ND dense array,\n   in case of sparse arrays it deletes the specified node */\nCVAPI(void) cvClearND( CvArr* arr, const int* idx );\n\n/** @brief Returns matrix header for arbitrary array.\n\nThe function returns a matrix header for the input array that can be a matrix - CvMat, an image -\nIplImage, or a multi-dimensional dense array - CvMatND (the third option is allowed only if\nallowND != 0) . In the case of matrix the function simply returns the input pointer. In the case of\nIplImage\\* or CvMatND it initializes the header structure with parameters of the current image ROI\nand returns &header. Because COI is not supported by CvMat, it is returned separately.\n\nThe function provides an easy way to handle both types of arrays - IplImage and CvMat using the same\ncode. Input array must have non-zero data pointer, otherwise the function will report an error.\n\n@note If the input array is IplImage with planar data layout and COI set, the function returns the\npointer to the selected plane and COI == 0. This feature allows user to process IplImage structures\nwith planar data layout, even though OpenCV does not support such images.\n@param arr Input array\n@param header Pointer to CvMat structure used as a temporary buffer\n@param coi Optional output parameter for storing COI\n@param allowND If non-zero, the function accepts multi-dimensional dense arrays (CvMatND\\*) and\nreturns 2D matrix (if CvMatND has two dimensions) or 1D matrix (when CvMatND has 1 dimension or\nmore than 2 dimensions). The CvMatND array must be continuous.\n@sa cvGetImage, cvarrToMat.\n */\nCVAPI(CvMat*) cvGetMat( const CvArr* arr, CvMat* header,\n                       int* coi CV_DEFAULT(NULL),\n                       int allowND CV_DEFAULT(0));\n\n/** @brief Returns image header for arbitrary array.\n\nThe function returns the image header for the input array that can be a matrix (CvMat) or image\n(IplImage). In the case of an image the function simply returns the input pointer. In the case of\nCvMat it initializes an image_header structure with the parameters of the input matrix. Note that\nif we transform IplImage to CvMat using cvGetMat and then transform CvMat back to IplImage using\nthis function, we will get different headers if the ROI is set in the original image.\n@param arr Input array\n@param image_header Pointer to IplImage structure used as a temporary buffer\n */\nCVAPI(IplImage*) cvGetImage( const CvArr* arr, IplImage* image_header );\n\n\n/** @brief Changes the shape of a multi-dimensional array without copying the data.\n\nThe function is an advanced version of cvReshape that can work with multi-dimensional arrays as\nwell (though it can work with ordinary images and matrices) and change the number of dimensions.\n\nBelow are the two samples from the cvReshape description rewritten using cvReshapeMatND:\n@code\n    IplImage* color_img = cvCreateImage(cvSize(320,240), IPL_DEPTH_8U, 3);\n    IplImage gray_img_hdr, *gray_img;\n    gray_img = (IplImage*)cvReshapeMatND(color_img, sizeof(gray_img_hdr), &gray_img_hdr, 1, 0, 0);\n    ...\n    int size[] = { 2, 2, 2 };\n    CvMatND* mat = cvCreateMatND(3, size, CV_32F);\n    CvMat row_header, *row;\n    row = (CvMat*)cvReshapeMatND(mat, sizeof(row_header), &row_header, 0, 1, 0);\n@endcode\nIn C, the header file for this function includes a convenient macro cvReshapeND that does away with\nthe sizeof_header parameter. So, the lines containing the call to cvReshapeMatND in the examples\nmay be replaced as follow:\n@code\n    gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0);\n    ...\n    row = (CvMat*)cvReshapeND(mat, &row_header, 0, 1, 0);\n@endcode\n@param arr Input array\n@param sizeof_header Size of output header to distinguish between IplImage, CvMat and CvMatND\noutput headers\n@param header Output header to be filled\n@param new_cn New number of channels. new_cn = 0 means that the number of channels remains\nunchanged.\n@param new_dims New number of dimensions. new_dims = 0 means that the number of dimensions\nremains the same.\n@param new_sizes Array of new dimension sizes. Only new_dims-1 values are used, because the\ntotal number of elements must remain the same. Thus, if new_dims = 1, new_sizes array is not\nused.\n */\nCVAPI(CvArr*) cvReshapeMatND( const CvArr* arr,\n                             int sizeof_header, CvArr* header,\n                             int new_cn, int new_dims, int* new_sizes );\n\n#define cvReshapeND( arr, header, new_cn, new_dims, new_sizes )   \\\n      cvReshapeMatND( (arr), sizeof(*(header)), (header),         \\\n                      (new_cn), (new_dims), (new_sizes))\n\n/** @brief Changes shape of matrix/image without copying data.\n\nThe function initializes the CvMat header so that it points to the same data as the original array\nbut has a different shape - different number of channels, different number of rows, or both.\n\nThe following example code creates one image buffer and two image headers, the first is for a\n320x240x3 image and the second is for a 960x240x1 image:\n@code\n    IplImage* color_img = cvCreateImage(cvSize(320,240), IPL_DEPTH_8U, 3);\n    CvMat gray_mat_hdr;\n    IplImage gray_img_hdr, *gray_img;\n    cvReshape(color_img, &gray_mat_hdr, 1);\n    gray_img = cvGetImage(&gray_mat_hdr, &gray_img_hdr);\n@endcode\nAnd the next example converts a 3x3 matrix to a single 1x9 vector:\n@code\n    CvMat* mat = cvCreateMat(3, 3, CV_32F);\n    CvMat row_header, *row;\n    row = cvReshape(mat, &row_header, 0, 1);\n@endcode\n@param arr Input array\n@param header Output header to be filled\n@param new_cn New number of channels. 'new_cn = 0' means that the number of channels remains\nunchanged.\n@param new_rows New number of rows. 'new_rows = 0' means that the number of rows remains\nunchanged unless it needs to be changed according to new_cn value.\n*/\nCVAPI(CvMat*) cvReshape( const CvArr* arr, CvMat* header,\n                        int new_cn, int new_rows CV_DEFAULT(0) );\n\n/** Repeats source 2d array several times in both horizontal and\n   vertical direction to fill destination array */\nCVAPI(void) cvRepeat( const CvArr* src, CvArr* dst );\n\n/** @brief Allocates array data\n\nThe function allocates image, matrix or multi-dimensional dense array data. Note that in the case of\nmatrix types OpenCV allocation functions are used. In the case of IplImage they are used unless\nCV_TURN_ON_IPL_COMPATIBILITY() has been called before. In the latter case IPL functions are used\nto allocate the data.\n@param arr Array header\n */\nCVAPI(void)  cvCreateData( CvArr* arr );\n\n/** @brief Releases array data.\n\nThe function releases the array data. In the case of CvMat or CvMatND it simply calls\ncvDecRefData(), that is the function can not deallocate external data. See also the note to\ncvCreateData .\n@param arr Array header\n */\nCVAPI(void)  cvReleaseData( CvArr* arr );\n\n/** @brief Assigns user data to the array header.\n\nThe function assigns user data to the array header. Header should be initialized before using\ncvCreateMatHeader, cvCreateImageHeader, cvCreateMatNDHeader, cvInitMatHeader,\ncvInitImageHeader or cvInitMatNDHeader.\n@param arr Array header\n@param data User data\n@param step Full row length in bytes\n */\nCVAPI(void)  cvSetData( CvArr* arr, void* data, int step );\n\n/** @brief Retrieves low-level information about the array.\n\nThe function fills output variables with low-level information about the array data. All output\n\nparameters are optional, so some of the pointers may be set to NULL. If the array is IplImage with\nROI set, the parameters of ROI are returned.\n\nThe following example shows how to get access to array elements. It computes absolute values of the\narray elements :\n@code\n    float* data;\n    int step;\n    CvSize size;\n\n    cvGetRawData(array, (uchar**)&data, &step, &size);\n    step /= sizeof(data[0]);\n\n    for(int y = 0; y < size.height; y++, data += step )\n        for(int x = 0; x < size.width; x++ )\n            data[x] = (float)fabs(data[x]);\n@endcode\n@param arr Array header\n@param data Output pointer to the whole image origin or ROI origin if ROI is set\n@param step Output full row length in bytes\n@param roi_size Output ROI size\n */\nCVAPI(void) cvGetRawData( const CvArr* arr, uchar** data,\n                         int* step CV_DEFAULT(NULL),\n                         CvSize* roi_size CV_DEFAULT(NULL));\n\n/** @brief Returns size of matrix or image ROI.\n\nThe function returns number of rows (CvSize::height) and number of columns (CvSize::width) of the\ninput matrix or image. In the case of image the size of ROI is returned.\n@param arr array header\n */\nCVAPI(CvSize) cvGetSize( const CvArr* arr );\n\n/** @brief Copies one array to another.\n\nThe function copies selected elements from an input array to an output array:\n\n\\f[\\texttt{dst} (I)= \\texttt{src} (I)  \\quad \\text{if} \\quad \\texttt{mask} (I)  \\ne 0.\\f]\n\nIf any of the passed arrays is of IplImage type, then its ROI and COI fields are used. Both arrays\nmust have the same type, the same number of dimensions, and the same size. The function can also\ncopy sparse arrays (mask is not supported in this case).\n@param src The source array\n@param dst The destination array\n@param mask Operation mask, 8-bit single channel array; specifies elements of the destination array\nto be changed\n */\nCVAPI(void)  cvCopy( const CvArr* src, CvArr* dst,\n                     const CvArr* mask CV_DEFAULT(NULL) );\n\n/** @brief Sets every element of an array to a given value.\n\nThe function copies the scalar value to every selected element of the destination array:\n\\f[\\texttt{arr} (I)= \\texttt{value} \\quad \\text{if} \\quad \\texttt{mask} (I)  \\ne 0\\f]\nIf array arr is of IplImage type, then is ROI used, but COI must not be set.\n@param arr The destination array\n@param value Fill value\n@param mask Operation mask, 8-bit single channel array; specifies elements of the destination\narray to be changed\n */\nCVAPI(void)  cvSet( CvArr* arr, CvScalar value,\n                    const CvArr* mask CV_DEFAULT(NULL) );\n\n/** @brief Clears the array.\n\nThe function clears the array. In the case of dense arrays (CvMat, CvMatND or IplImage),\ncvZero(array) is equivalent to cvSet(array,cvScalarAll(0),0). In the case of sparse arrays all the\nelements are removed.\n@param arr Array to be cleared\n */\nCVAPI(void)  cvSetZero( CvArr* arr );\n#define cvZero  cvSetZero\n\n\n/** Splits a multi-channel array into the set of single-channel arrays or\n   extracts particular [color] plane */\nCVAPI(void)  cvSplit( const CvArr* src, CvArr* dst0, CvArr* dst1,\n                      CvArr* dst2, CvArr* dst3 );\n\n/** Merges a set of single-channel arrays into the single multi-channel array\n   or inserts one particular [color] plane to the array */\nCVAPI(void)  cvMerge( const CvArr* src0, const CvArr* src1,\n                      const CvArr* src2, const CvArr* src3,\n                      CvArr* dst );\n\n/** Copies several channels from input arrays to\n   certain channels of output arrays */\nCVAPI(void)  cvMixChannels( const CvArr** src, int src_count,\n                            CvArr** dst, int dst_count,\n                            const int* from_to, int pair_count );\n\n/** @brief Converts one array to another with optional linear transformation.\n\nThe function has several different purposes, and thus has several different names. It copies one\narray to another with optional scaling, which is performed first, and/or optional type conversion,\nperformed after:\n\n\\f[\\texttt{dst} (I) =  \\texttt{scale} \\texttt{src} (I) + ( \\texttt{shift} _0, \\texttt{shift} _1,...)\\f]\n\nAll the channels of multi-channel arrays are processed independently.\n\nThe type of conversion is done with rounding and saturation, that is if the result of scaling +\nconversion can not be represented exactly by a value of the destination array element type, it is\nset to the nearest representable value on the real axis.\n@param src Source array\n@param dst Destination array\n@param scale Scale factor\n@param shift Value added to the scaled source array elements\n */\nCVAPI(void)  cvConvertScale( const CvArr* src, CvArr* dst,\n                             double scale CV_DEFAULT(1),\n                             double shift CV_DEFAULT(0) );\n#define cvCvtScale cvConvertScale\n#define cvScale  cvConvertScale\n#define cvConvert( src, dst )  cvConvertScale( (src), (dst), 1, 0 )\n\n\n/** Performs linear transformation on every source array element,\n   stores absolute value of the result:\n   dst(x,y,c) = abs(scale*src(x,y,c)+shift).\n   destination array must have 8u type.\n   In other cases one may use cvConvertScale + cvAbsDiffS */\nCVAPI(void)  cvConvertScaleAbs( const CvArr* src, CvArr* dst,\n                                double scale CV_DEFAULT(1),\n                                double shift CV_DEFAULT(0) );\n#define cvCvtScaleAbs  cvConvertScaleAbs\n\n\n/** checks termination criteria validity and\n   sets eps to default_eps (if it is not set),\n   max_iter to default_max_iters (if it is not set)\n*/\nCVAPI(CvTermCriteria) cvCheckTermCriteria( CvTermCriteria criteria,\n                                           double default_eps,\n                                           int default_max_iters );\n\n/****************************************************************************************\\\n*                   Arithmetic, logic and comparison operations                          *\n\\****************************************************************************************/\n\n/** dst(mask) = src1(mask) + src2(mask) */\nCVAPI(void)  cvAdd( const CvArr* src1, const CvArr* src2, CvArr* dst,\n                    const CvArr* mask CV_DEFAULT(NULL));\n\n/** dst(mask) = src(mask) + value */\nCVAPI(void)  cvAddS( const CvArr* src, CvScalar value, CvArr* dst,\n                     const CvArr* mask CV_DEFAULT(NULL));\n\n/** dst(mask) = src1(mask) - src2(mask) */\nCVAPI(void)  cvSub( const CvArr* src1, const CvArr* src2, CvArr* dst,\n                    const CvArr* mask CV_DEFAULT(NULL));\n\n/** dst(mask) = src(mask) - value = src(mask) + (-value) */\nCV_INLINE  void  cvSubS( const CvArr* src, CvScalar value, CvArr* dst,\n                         const CvArr* mask CV_DEFAULT(NULL))\n{\n    cvAddS( src, cvScalar( -value.val[0], -value.val[1], -value.val[2], -value.val[3]),\n            dst, mask );\n}\n\n/** dst(mask) = value - src(mask) */\nCVAPI(void)  cvSubRS( const CvArr* src, CvScalar value, CvArr* dst,\n                      const CvArr* mask CV_DEFAULT(NULL));\n\n/** dst(idx) = src1(idx) * src2(idx) * scale\n   (scaled element-wise multiplication of 2 arrays) */\nCVAPI(void)  cvMul( const CvArr* src1, const CvArr* src2,\n                    CvArr* dst, double scale CV_DEFAULT(1) );\n\n/** element-wise division/inversion with scaling:\n    dst(idx) = src1(idx) * scale / src2(idx)\n    or dst(idx) = scale / src2(idx) if src1 == 0 */\nCVAPI(void)  cvDiv( const CvArr* src1, const CvArr* src2,\n                    CvArr* dst, double scale CV_DEFAULT(1));\n\n/** dst = src1 * scale + src2 */\nCVAPI(void)  cvScaleAdd( const CvArr* src1, CvScalar scale,\n                         const CvArr* src2, CvArr* dst );\n#define cvAXPY( A, real_scalar, B, C ) cvScaleAdd(A, cvRealScalar(real_scalar), B, C)\n\n/** dst = src1 * alpha + src2 * beta + gamma */\nCVAPI(void)  cvAddWeighted( const CvArr* src1, double alpha,\n                            const CvArr* src2, double beta,\n                            double gamma, CvArr* dst );\n\n/** @brief Calculates the dot product of two arrays in Euclidean metrics.\n\nThe function calculates and returns the Euclidean dot product of two arrays.\n\n\\f[src1  \\bullet src2 =  \\sum _I ( \\texttt{src1} (I)  \\texttt{src2} (I))\\f]\n\nIn the case of multiple channel arrays, the results for all channels are accumulated. In particular,\ncvDotProduct(a,a) where a is a complex vector, will return \\f$||\\texttt{a}||^2\\f$. The function can\nprocess multi-dimensional arrays, row by row, layer by layer, and so on.\n@param src1 The first source array\n@param src2 The second source array\n */\nCVAPI(double)  cvDotProduct( const CvArr* src1, const CvArr* src2 );\n\n/** dst(idx) = src1(idx) & src2(idx) */\nCVAPI(void) cvAnd( const CvArr* src1, const CvArr* src2,\n                  CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));\n\n/** dst(idx) = src(idx) & value */\nCVAPI(void) cvAndS( const CvArr* src, CvScalar value,\n                   CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));\n\n/** dst(idx) = src1(idx) | src2(idx) */\nCVAPI(void) cvOr( const CvArr* src1, const CvArr* src2,\n                 CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));\n\n/** dst(idx) = src(idx) | value */\nCVAPI(void) cvOrS( const CvArr* src, CvScalar value,\n                  CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));\n\n/** dst(idx) = src1(idx) ^ src2(idx) */\nCVAPI(void) cvXor( const CvArr* src1, const CvArr* src2,\n                  CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));\n\n/** dst(idx) = src(idx) ^ value */\nCVAPI(void) cvXorS( const CvArr* src, CvScalar value,\n                   CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));\n\n/** dst(idx) = ~src(idx) */\nCVAPI(void) cvNot( const CvArr* src, CvArr* dst );\n\n/** dst(idx) = lower(idx) <= src(idx) < upper(idx) */\nCVAPI(void) cvInRange( const CvArr* src, const CvArr* lower,\n                      const CvArr* upper, CvArr* dst );\n\n/** dst(idx) = lower <= src(idx) < upper */\nCVAPI(void) cvInRangeS( const CvArr* src, CvScalar lower,\n                       CvScalar upper, CvArr* dst );\n\n#define CV_CMP_EQ   0\n#define CV_CMP_GT   1\n#define CV_CMP_GE   2\n#define CV_CMP_LT   3\n#define CV_CMP_LE   4\n#define CV_CMP_NE   5\n\n/** The comparison operation support single-channel arrays only.\n   Destination image should be 8uC1 or 8sC1 */\n\n/** dst(idx) = src1(idx) _cmp_op_ src2(idx) */\nCVAPI(void) cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op );\n\n/** dst(idx) = src1(idx) _cmp_op_ value */\nCVAPI(void) cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op );\n\n/** dst(idx) = min(src1(idx),src2(idx)) */\nCVAPI(void) cvMin( const CvArr* src1, const CvArr* src2, CvArr* dst );\n\n/** dst(idx) = max(src1(idx),src2(idx)) */\nCVAPI(void) cvMax( const CvArr* src1, const CvArr* src2, CvArr* dst );\n\n/** dst(idx) = min(src(idx),value) */\nCVAPI(void) cvMinS( const CvArr* src, double value, CvArr* dst );\n\n/** dst(idx) = max(src(idx),value) */\nCVAPI(void) cvMaxS( const CvArr* src, double value, CvArr* dst );\n\n/** dst(x,y,c) = abs(src1(x,y,c) - src2(x,y,c)) */\nCVAPI(void) cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst );\n\n/** dst(x,y,c) = abs(src(x,y,c) - value(c)) */\nCVAPI(void) cvAbsDiffS( const CvArr* src, CvArr* dst, CvScalar value );\n#define cvAbs( src, dst ) cvAbsDiffS( (src), (dst), cvScalarAll(0))\n\n/****************************************************************************************\\\n*                                Math operations                                         *\n\\****************************************************************************************/\n\n/** Does cartesian->polar coordinates conversion.\n   Either of output components (magnitude or angle) is optional */\nCVAPI(void)  cvCartToPolar( const CvArr* x, const CvArr* y,\n                            CvArr* magnitude, CvArr* angle CV_DEFAULT(NULL),\n                            int angle_in_degrees CV_DEFAULT(0));\n\n/** Does polar->cartesian coordinates conversion.\n   Either of output components (magnitude or angle) is optional.\n   If magnitude is missing it is assumed to be all 1's */\nCVAPI(void)  cvPolarToCart( const CvArr* magnitude, const CvArr* angle,\n                            CvArr* x, CvArr* y,\n                            int angle_in_degrees CV_DEFAULT(0));\n\n/** Does powering: dst(idx) = src(idx)^power */\nCVAPI(void)  cvPow( const CvArr* src, CvArr* dst, double power );\n\n/** Does exponention: dst(idx) = exp(src(idx)).\n   Overflow is not handled yet. Underflow is handled.\n   Maximal relative error is ~7e-6 for single-precision input */\nCVAPI(void)  cvExp( const CvArr* src, CvArr* dst );\n\n/** Calculates natural logarithms: dst(idx) = log(abs(src(idx))).\n   Logarithm of 0 gives large negative number(~-700)\n   Maximal relative error is ~3e-7 for single-precision output\n*/\nCVAPI(void)  cvLog( const CvArr* src, CvArr* dst );\n\n/** Fast arctangent calculation */\nCVAPI(float) cvFastArctan( float y, float x );\n\n/** Fast cubic root calculation */\nCVAPI(float)  cvCbrt( float value );\n\n#define  CV_CHECK_RANGE    1\n#define  CV_CHECK_QUIET    2\n/** Checks array values for NaNs, Infs or simply for too large numbers\n   (if CV_CHECK_RANGE is set). If CV_CHECK_QUIET is set,\n   no runtime errors is raised (function returns zero value in case of \"bad\" values).\n   Otherwise cvError is called */\nCVAPI(int)  cvCheckArr( const CvArr* arr, int flags CV_DEFAULT(0),\n                        double min_val CV_DEFAULT(0), double max_val CV_DEFAULT(0));\n#define cvCheckArray cvCheckArr\n\n#define CV_RAND_UNI      0\n#define CV_RAND_NORMAL   1\n\n/** @brief Fills an array with random numbers and updates the RNG state.\n\nThe function fills the destination array with uniformly or normally distributed random numbers.\n@param rng CvRNG state initialized by cvRNG\n@param arr The destination array\n@param dist_type Distribution type\n> -   **CV_RAND_UNI** uniform distribution\n> -   **CV_RAND_NORMAL** normal or Gaussian distribution\n@param param1 The first parameter of the distribution. In the case of a uniform distribution it is\nthe inclusive lower boundary of the random numbers range. In the case of a normal distribution it\nis the mean value of the random numbers.\n@param param2 The second parameter of the distribution. In the case of a uniform distribution it\nis the exclusive upper boundary of the random numbers range. In the case of a normal distribution\nit is the standard deviation of the random numbers.\n@sa randu, randn, RNG::fill.\n */\nCVAPI(void) cvRandArr( CvRNG* rng, CvArr* arr, int dist_type,\n                      CvScalar param1, CvScalar param2 );\n\nCVAPI(void) cvRandShuffle( CvArr* mat, CvRNG* rng,\n                           double iter_factor CV_DEFAULT(1.));\n\n#define CV_SORT_EVERY_ROW 0\n#define CV_SORT_EVERY_COLUMN 1\n#define CV_SORT_ASCENDING 0\n#define CV_SORT_DESCENDING 16\n\nCVAPI(void) cvSort( const CvArr* src, CvArr* dst CV_DEFAULT(NULL),\n                    CvArr* idxmat CV_DEFAULT(NULL),\n                    int flags CV_DEFAULT(0));\n\n/** Finds real roots of a cubic equation */\nCVAPI(int) cvSolveCubic( const CvMat* coeffs, CvMat* roots );\n\n/** Finds all real and complex roots of a polynomial equation */\nCVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2,\n      int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100));\n\n/****************************************************************************************\\\n*                                Matrix operations                                       *\n\\****************************************************************************************/\n\n/** @brief Calculates the cross product of two 3D vectors.\n\nThe function calculates the cross product of two 3D vectors:\n\\f[\\texttt{dst} =  \\texttt{src1} \\times \\texttt{src2}\\f]\nor:\n\\f[\\begin{array}{l} \\texttt{dst} _1 =  \\texttt{src1} _2  \\texttt{src2} _3 -  \\texttt{src1} _3  \\texttt{src2} _2 \\\\ \\texttt{dst} _2 =  \\texttt{src1} _3  \\texttt{src2} _1 -  \\texttt{src1} _1  \\texttt{src2} _3 \\\\ \\texttt{dst} _3 =  \\texttt{src1} _1  \\texttt{src2} _2 -  \\texttt{src1} _2  \\texttt{src2} _1 \\end{array}\\f]\n@param src1 The first source vector\n@param src2 The second source vector\n@param dst The destination vector\n */\nCVAPI(void)  cvCrossProduct( const CvArr* src1, const CvArr* src2, CvArr* dst );\n\n/** Matrix transform: dst = A*B + C, C is optional */\n#define cvMatMulAdd( src1, src2, src3, dst ) cvGEMM( (src1), (src2), 1., (src3), 1., (dst), 0 )\n#define cvMatMul( src1, src2, dst )  cvMatMulAdd( (src1), (src2), NULL, (dst))\n\n#define CV_GEMM_A_T 1\n#define CV_GEMM_B_T 2\n#define CV_GEMM_C_T 4\n/** Extended matrix transform:\n   dst = alpha*op(A)*op(B) + beta*op(C), where op(X) is X or X^T */\nCVAPI(void)  cvGEMM( const CvArr* src1, const CvArr* src2, double alpha,\n                     const CvArr* src3, double beta, CvArr* dst,\n                     int tABC CV_DEFAULT(0));\n#define cvMatMulAddEx cvGEMM\n\n/** Transforms each element of source array and stores\n   resultant vectors in destination array */\nCVAPI(void)  cvTransform( const CvArr* src, CvArr* dst,\n                          const CvMat* transmat,\n                          const CvMat* shiftvec CV_DEFAULT(NULL));\n#define cvMatMulAddS cvTransform\n\n/** Does perspective transform on every element of input array */\nCVAPI(void)  cvPerspectiveTransform( const CvArr* src, CvArr* dst,\n                                     const CvMat* mat );\n\n/** Calculates (A-delta)*(A-delta)^T (order=0) or (A-delta)^T*(A-delta) (order=1) */\nCVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order,\n                             const CvArr* delta CV_DEFAULT(NULL),\n                             double scale CV_DEFAULT(1.) );\n\n/** Tranposes matrix. Square matrices can be transposed in-place */\nCVAPI(void)  cvTranspose( const CvArr* src, CvArr* dst );\n#define cvT cvTranspose\n\n/** Completes the symmetric matrix from the lower (LtoR=0) or from the upper (LtoR!=0) part */\nCVAPI(void)  cvCompleteSymm( CvMat* matrix, int LtoR CV_DEFAULT(0) );\n\n/** Mirror array data around horizontal (flip=0),\n   vertical (flip=1) or both(flip=-1) axises:\n   cvFlip(src) flips images vertically and sequences horizontally (inplace) */\nCVAPI(void)  cvFlip( const CvArr* src, CvArr* dst CV_DEFAULT(NULL),\n                     int flip_mode CV_DEFAULT(0));\n#define cvMirror cvFlip\n\n\n#define CV_SVD_MODIFY_A   1\n#define CV_SVD_U_T        2\n#define CV_SVD_V_T        4\n\n/** Performs Singular Value Decomposition of a matrix */\nCVAPI(void)   cvSVD( CvArr* A, CvArr* W, CvArr* U CV_DEFAULT(NULL),\n                     CvArr* V CV_DEFAULT(NULL), int flags CV_DEFAULT(0));\n\n/** Performs Singular Value Back Substitution (solves A*X = B):\n   flags must be the same as in cvSVD */\nCVAPI(void)   cvSVBkSb( const CvArr* W, const CvArr* U,\n                        const CvArr* V, const CvArr* B,\n                        CvArr* X, int flags );\n\n#define CV_LU  0\n#define CV_SVD 1\n#define CV_SVD_SYM 2\n#define CV_CHOLESKY 3\n#define CV_QR  4\n#define CV_NORMAL 16\n\n/** Inverts matrix */\nCVAPI(double)  cvInvert( const CvArr* src, CvArr* dst,\n                         int method CV_DEFAULT(CV_LU));\n#define cvInv cvInvert\n\n/** Solves linear system (src1)*(dst) = (src2)\n   (returns 0 if src1 is a singular and CV_LU method is used) */\nCVAPI(int)  cvSolve( const CvArr* src1, const CvArr* src2, CvArr* dst,\n                     int method CV_DEFAULT(CV_LU));\n\n/** Calculates determinant of input matrix */\nCVAPI(double) cvDet( const CvArr* mat );\n\n/** Calculates trace of the matrix (sum of elements on the main diagonal) */\nCVAPI(CvScalar) cvTrace( const CvArr* mat );\n\n/** Finds eigen values and vectors of a symmetric matrix */\nCVAPI(void)  cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals,\n                        double eps CV_DEFAULT(0),\n                        int lowindex CV_DEFAULT(-1),\n                        int highindex CV_DEFAULT(-1));\n\n///* Finds selected eigen values and vectors of a symmetric matrix */\n//CVAPI(void)  cvSelectedEigenVV( CvArr* mat, CvArr* evects, CvArr* evals,\n//                                int lowindex, int highindex );\n\n/** Makes an identity matrix (mat_ij = i == j) */\nCVAPI(void)  cvSetIdentity( CvArr* mat, CvScalar value CV_DEFAULT(cvRealScalar(1)) );\n\n/** Fills matrix with given range of numbers */\nCVAPI(CvArr*)  cvRange( CvArr* mat, double start, double end );\n\n/**   @anchor core_c_CovarFlags\n@name Flags for cvCalcCovarMatrix\n@see cvCalcCovarMatrix\n  @{\n*/\n\n/** flag for cvCalcCovarMatrix, transpose([v1-avg, v2-avg,...]) * [v1-avg,v2-avg,...] */\n#define CV_COVAR_SCRAMBLED 0\n\n/** flag for cvCalcCovarMatrix, [v1-avg, v2-avg,...] * transpose([v1-avg,v2-avg,...]) */\n#define CV_COVAR_NORMAL    1\n\n/** flag for cvCalcCovarMatrix, do not calc average (i.e. mean vector) - use the input vector instead\n   (useful for calculating covariance matrix by parts) */\n#define CV_COVAR_USE_AVG   2\n\n/** flag for cvCalcCovarMatrix, scale the covariance matrix coefficients by number of the vectors */\n#define CV_COVAR_SCALE     4\n\n/** flag for cvCalcCovarMatrix, all the input vectors are stored in a single matrix, as its rows */\n#define CV_COVAR_ROWS      8\n\n/** flag for cvCalcCovarMatrix, all the input vectors are stored in a single matrix, as its columns */\n#define CV_COVAR_COLS     16\n\n/** @} */\n\n/** Calculates covariation matrix for a set of vectors\n@see @ref core_c_CovarFlags \"flags\"\n*/\nCVAPI(void)  cvCalcCovarMatrix( const CvArr** vects, int count,\n                                CvArr* cov_mat, CvArr* avg, int flags );\n\n#define CV_PCA_DATA_AS_ROW 0\n#define CV_PCA_DATA_AS_COL 1\n#define CV_PCA_USE_AVG 2\nCVAPI(void)  cvCalcPCA( const CvArr* data, CvArr* mean,\n                        CvArr* eigenvals, CvArr* eigenvects, int flags );\n\nCVAPI(void)  cvProjectPCA( const CvArr* data, const CvArr* mean,\n                           const CvArr* eigenvects, CvArr* result );\n\nCVAPI(void)  cvBackProjectPCA( const CvArr* proj, const CvArr* mean,\n                               const CvArr* eigenvects, CvArr* result );\n\n/** Calculates Mahalanobis(weighted) distance */\nCVAPI(double)  cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat );\n#define cvMahalonobis  cvMahalanobis\n\n/****************************************************************************************\\\n*                                    Array Statistics                                    *\n\\****************************************************************************************/\n\n/** Finds sum of array elements */\nCVAPI(CvScalar)  cvSum( const CvArr* arr );\n\n/** Calculates number of non-zero pixels */\nCVAPI(int)  cvCountNonZero( const CvArr* arr );\n\n/** Calculates mean value of array elements */\nCVAPI(CvScalar)  cvAvg( const CvArr* arr, const CvArr* mask CV_DEFAULT(NULL) );\n\n/** Calculates mean and standard deviation of pixel values */\nCVAPI(void)  cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev,\n                       const CvArr* mask CV_DEFAULT(NULL) );\n\n/** Finds global minimum, maximum and their positions */\nCVAPI(void)  cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val,\n                          CvPoint* min_loc CV_DEFAULT(NULL),\n                          CvPoint* max_loc CV_DEFAULT(NULL),\n                          const CvArr* mask CV_DEFAULT(NULL) );\n\n/** @anchor core_c_NormFlags\n  @name Flags for cvNorm and cvNormalize\n  @{\n*/\n#define CV_C            1\n#define CV_L1           2\n#define CV_L2           4\n#define CV_NORM_MASK    7\n#define CV_RELATIVE     8\n#define CV_DIFF         16\n#define CV_MINMAX       32\n\n#define CV_DIFF_C       (CV_DIFF | CV_C)\n#define CV_DIFF_L1      (CV_DIFF | CV_L1)\n#define CV_DIFF_L2      (CV_DIFF | CV_L2)\n#define CV_RELATIVE_C   (CV_RELATIVE | CV_C)\n#define CV_RELATIVE_L1  (CV_RELATIVE | CV_L1)\n#define CV_RELATIVE_L2  (CV_RELATIVE | CV_L2)\n/** @} */\n\n/** Finds norm, difference norm or relative difference norm for an array (or two arrays)\n@see ref core_c_NormFlags \"flags\"\n*/\nCVAPI(double)  cvNorm( const CvArr* arr1, const CvArr* arr2 CV_DEFAULT(NULL),\n                       int norm_type CV_DEFAULT(CV_L2),\n                       const CvArr* mask CV_DEFAULT(NULL) );\n\n/** @see ref core_c_NormFlags \"flags\" */\nCVAPI(void)  cvNormalize( const CvArr* src, CvArr* dst,\n                          double a CV_DEFAULT(1.), double b CV_DEFAULT(0.),\n                          int norm_type CV_DEFAULT(CV_L2),\n                          const CvArr* mask CV_DEFAULT(NULL) );\n\n/** @anchor core_c_ReduceFlags\n  @name Flags for cvReduce\n  @{\n*/\n#define CV_REDUCE_SUM 0\n#define CV_REDUCE_AVG 1\n#define CV_REDUCE_MAX 2\n#define CV_REDUCE_MIN 3\n/** @} */\n\n/** @see @ref core_c_ReduceFlags \"flags\" */\nCVAPI(void)  cvReduce( const CvArr* src, CvArr* dst, int dim CV_DEFAULT(-1),\n                       int op CV_DEFAULT(CV_REDUCE_SUM) );\n\n/****************************************************************************************\\\n*                      Discrete Linear Transforms and Related Functions                  *\n\\****************************************************************************************/\n\n/** @anchor core_c_DftFlags\n  @name Flags for cvDFT, cvDCT and cvMulSpectrums\n  @{\n  */\n#define CV_DXT_FORWARD  0\n#define CV_DXT_INVERSE  1\n#define CV_DXT_SCALE    2 /**< divide result by size of array */\n#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE)\n#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE\n#define CV_DXT_ROWS     4 /**< transform each row individually */\n#define CV_DXT_MUL_CONJ 8 /**< conjugate the second argument of cvMulSpectrums */\n/** @} */\n\n/** Discrete Fourier Transform:\n    complex->complex,\n    real->ccs (forward),\n    ccs->real (inverse)\n@see core_c_DftFlags \"flags\"\n*/\nCVAPI(void)  cvDFT( const CvArr* src, CvArr* dst, int flags,\n                    int nonzero_rows CV_DEFAULT(0) );\n#define cvFFT cvDFT\n\n/** Multiply results of DFTs: DFT(X)*DFT(Y) or DFT(X)*conj(DFT(Y))\n@see core_c_DftFlags \"flags\"\n*/\nCVAPI(void)  cvMulSpectrums( const CvArr* src1, const CvArr* src2,\n                             CvArr* dst, int flags );\n\n/** Finds optimal DFT vector size >= size0 */\nCVAPI(int)  cvGetOptimalDFTSize( int size0 );\n\n/** Discrete Cosine Transform\n@see core_c_DftFlags \"flags\"\n*/\nCVAPI(void)  cvDCT( const CvArr* src, CvArr* dst, int flags );\n\n/****************************************************************************************\\\n*                              Dynamic data structures                                   *\n\\****************************************************************************************/\n\n/** Calculates length of sequence slice (with support of negative indices). */\nCVAPI(int) cvSliceLength( CvSlice slice, const CvSeq* seq );\n\n\n/** Creates new memory storage.\n   block_size == 0 means that default,\n   somewhat optimal size, is used (currently, it is 64K) */\nCVAPI(CvMemStorage*)  cvCreateMemStorage( int block_size CV_DEFAULT(0));\n\n\n/** Creates a memory storage that will borrow memory blocks from parent storage */\nCVAPI(CvMemStorage*)  cvCreateChildMemStorage( CvMemStorage* parent );\n\n\n/** Releases memory storage. All the children of a parent must be released before\n   the parent. A child storage returns all the blocks to parent when it is released */\nCVAPI(void)  cvReleaseMemStorage( CvMemStorage** storage );\n\n\n/** Clears memory storage. This is the only way(!!!) (besides cvRestoreMemStoragePos)\n   to reuse memory allocated for the storage - cvClearSeq,cvClearSet ...\n   do not free any memory.\n   A child storage returns all the blocks to the parent when it is cleared */\nCVAPI(void)  cvClearMemStorage( CvMemStorage* storage );\n\n/** Remember a storage \"free memory\" position */\nCVAPI(void)  cvSaveMemStoragePos( const CvMemStorage* storage, CvMemStoragePos* pos );\n\n/** Restore a storage \"free memory\" position */\nCVAPI(void)  cvRestoreMemStoragePos( CvMemStorage* storage, CvMemStoragePos* pos );\n\n/** Allocates continuous buffer of the specified size in the storage */\nCVAPI(void*) cvMemStorageAlloc( CvMemStorage* storage, size_t size );\n\n/** Allocates string in memory storage */\nCVAPI(CvString) cvMemStorageAllocString( CvMemStorage* storage, const char* ptr,\n                                         int len CV_DEFAULT(-1) );\n\n/** Creates new empty sequence that will reside in the specified storage */\nCVAPI(CvSeq*)  cvCreateSeq( int seq_flags, size_t header_size,\n                            size_t elem_size, CvMemStorage* storage );\n\n/** Changes default size (granularity) of sequence blocks.\n   The default size is ~1Kbyte */\nCVAPI(void)  cvSetSeqBlockSize( CvSeq* seq, int delta_elems );\n\n\n/** Adds new element to the end of sequence. Returns pointer to the element */\nCVAPI(schar*)  cvSeqPush( CvSeq* seq, const void* element CV_DEFAULT(NULL));\n\n\n/** Adds new element to the beginning of sequence. Returns pointer to it */\nCVAPI(schar*)  cvSeqPushFront( CvSeq* seq, const void* element CV_DEFAULT(NULL));\n\n\n/** Removes the last element from sequence and optionally saves it */\nCVAPI(void)  cvSeqPop( CvSeq* seq, void* element CV_DEFAULT(NULL));\n\n\n/** Removes the first element from sequence and optioanally saves it */\nCVAPI(void)  cvSeqPopFront( CvSeq* seq, void* element CV_DEFAULT(NULL));\n\n\n#define CV_FRONT 1\n#define CV_BACK 0\n/** Adds several new elements to the end of sequence */\nCVAPI(void)  cvSeqPushMulti( CvSeq* seq, const void* elements,\n                             int count, int in_front CV_DEFAULT(0) );\n\n/** Removes several elements from the end of sequence and optionally saves them */\nCVAPI(void)  cvSeqPopMulti( CvSeq* seq, void* elements,\n                            int count, int in_front CV_DEFAULT(0) );\n\n/** Inserts a new element in the middle of sequence.\n   cvSeqInsert(seq,0,elem) == cvSeqPushFront(seq,elem) */\nCVAPI(schar*)  cvSeqInsert( CvSeq* seq, int before_index,\n                            const void* element CV_DEFAULT(NULL));\n\n/** Removes specified sequence element */\nCVAPI(void)  cvSeqRemove( CvSeq* seq, int index );\n\n\n/** Removes all the elements from the sequence. The freed memory\n   can be reused later only by the same sequence unless cvClearMemStorage\n   or cvRestoreMemStoragePos is called */\nCVAPI(void)  cvClearSeq( CvSeq* seq );\n\n\n/** Retrieves pointer to specified sequence element.\n   Negative indices are supported and mean counting from the end\n   (e.g -1 means the last sequence element) */\nCVAPI(schar*)  cvGetSeqElem( const CvSeq* seq, int index );\n\n/** Calculates index of the specified sequence element.\n   Returns -1 if element does not belong to the sequence */\nCVAPI(int)  cvSeqElemIdx( const CvSeq* seq, const void* element,\n                         CvSeqBlock** block CV_DEFAULT(NULL) );\n\n/** Initializes sequence writer. The new elements will be added to the end of sequence */\nCVAPI(void)  cvStartAppendToSeq( CvSeq* seq, CvSeqWriter* writer );\n\n\n/** Combination of cvCreateSeq and cvStartAppendToSeq */\nCVAPI(void)  cvStartWriteSeq( int seq_flags, int header_size,\n                              int elem_size, CvMemStorage* storage,\n                              CvSeqWriter* writer );\n\n/** Closes sequence writer, updates sequence header and returns pointer\n   to the resultant sequence\n   (which may be useful if the sequence was created using cvStartWriteSeq))\n*/\nCVAPI(CvSeq*)  cvEndWriteSeq( CvSeqWriter* writer );\n\n\n/** Updates sequence header. May be useful to get access to some of previously\n   written elements via cvGetSeqElem or sequence reader */\nCVAPI(void)   cvFlushSeqWriter( CvSeqWriter* writer );\n\n\n/** Initializes sequence reader.\n   The sequence can be read in forward or backward direction */\nCVAPI(void) cvStartReadSeq( const CvSeq* seq, CvSeqReader* reader,\n                           int reverse CV_DEFAULT(0) );\n\n\n/** Returns current sequence reader position (currently observed sequence element) */\nCVAPI(int)  cvGetSeqReaderPos( CvSeqReader* reader );\n\n\n/** Changes sequence reader position. It may seek to an absolute or\n   to relative to the current position */\nCVAPI(void)   cvSetSeqReaderPos( CvSeqReader* reader, int index,\n                                 int is_relative CV_DEFAULT(0));\n\n/** Copies sequence content to a continuous piece of memory */\nCVAPI(void*)  cvCvtSeqToArray( const CvSeq* seq, void* elements,\n                               CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ) );\n\n/** Creates sequence header for array.\n   After that all the operations on sequences that do not alter the content\n   can be applied to the resultant sequence */\nCVAPI(CvSeq*) cvMakeSeqHeaderForArray( int seq_type, int header_size,\n                                       int elem_size, void* elements, int total,\n                                       CvSeq* seq, CvSeqBlock* block );\n\n/** Extracts sequence slice (with or without copying sequence elements) */\nCVAPI(CvSeq*) cvSeqSlice( const CvSeq* seq, CvSlice slice,\n                         CvMemStorage* storage CV_DEFAULT(NULL),\n                         int copy_data CV_DEFAULT(0));\n\nCV_INLINE CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage CV_DEFAULT(NULL))\n{\n    return cvSeqSlice( seq, CV_WHOLE_SEQ, storage, 1 );\n}\n\n/** Removes sequence slice */\nCVAPI(void)  cvSeqRemoveSlice( CvSeq* seq, CvSlice slice );\n\n/** Inserts a sequence or array into another sequence */\nCVAPI(void)  cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr );\n\n/** a < b ? -1 : a > b ? 1 : 0 */\ntypedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata );\n\n/** Sorts sequence in-place given element comparison function */\nCVAPI(void) cvSeqSort( CvSeq* seq, CvCmpFunc func, void* userdata CV_DEFAULT(NULL) );\n\n/** Finds element in a [sorted] sequence */\nCVAPI(schar*) cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func,\n                           int is_sorted, int* elem_idx,\n                           void* userdata CV_DEFAULT(NULL) );\n\n/** Reverses order of sequence elements in-place */\nCVAPI(void) cvSeqInvert( CvSeq* seq );\n\n/** Splits sequence into one or more equivalence classes using the specified criteria */\nCVAPI(int)  cvSeqPartition( const CvSeq* seq, CvMemStorage* storage,\n                            CvSeq** labels, CvCmpFunc is_equal, void* userdata );\n\n/************ Internal sequence functions ************/\nCVAPI(void)  cvChangeSeqBlock( void* reader, int direction );\nCVAPI(void)  cvCreateSeqBlock( CvSeqWriter* writer );\n\n\n/** Creates a new set */\nCVAPI(CvSet*)  cvCreateSet( int set_flags, int header_size,\n                            int elem_size, CvMemStorage* storage );\n\n/** Adds new element to the set and returns pointer to it */\nCVAPI(int)  cvSetAdd( CvSet* set_header, CvSetElem* elem CV_DEFAULT(NULL),\n                      CvSetElem** inserted_elem CV_DEFAULT(NULL) );\n\n/** Fast variant of cvSetAdd */\nCV_INLINE  CvSetElem* cvSetNew( CvSet* set_header )\n{\n    CvSetElem* elem = set_header->free_elems;\n    if( elem )\n    {\n        set_header->free_elems = elem->next_free;\n        elem->flags = elem->flags & CV_SET_ELEM_IDX_MASK;\n        set_header->active_count++;\n    }\n    else\n        cvSetAdd( set_header, NULL, &elem );\n    return elem;\n}\n\n/** Removes set element given its pointer */\nCV_INLINE  void cvSetRemoveByPtr( CvSet* set_header, void* elem )\n{\n    CvSetElem* _elem = (CvSetElem*)elem;\n    assert( _elem->flags >= 0 /*&& (elem->flags & CV_SET_ELEM_IDX_MASK) < set_header->total*/ );\n    _elem->next_free = set_header->free_elems;\n    _elem->flags = (_elem->flags & CV_SET_ELEM_IDX_MASK) | CV_SET_ELEM_FREE_FLAG;\n    set_header->free_elems = _elem;\n    set_header->active_count--;\n}\n\n/** Removes element from the set by its index  */\nCVAPI(void)   cvSetRemove( CvSet* set_header, int index );\n\n/** Returns a set element by index. If the element doesn't belong to the set,\n   NULL is returned */\nCV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int idx )\n{\n    CvSetElem* elem = (CvSetElem*)(void *)cvGetSeqElem( (CvSeq*)set_header, idx );\n    return elem && CV_IS_SET_ELEM( elem ) ? elem : 0;\n}\n\n/** Removes all the elements from the set */\nCVAPI(void)  cvClearSet( CvSet* set_header );\n\n/** Creates new graph */\nCVAPI(CvGraph*)  cvCreateGraph( int graph_flags, int header_size,\n                                int vtx_size, int edge_size,\n                                CvMemStorage* storage );\n\n/** Adds new vertex to the graph */\nCVAPI(int)  cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* vtx CV_DEFAULT(NULL),\n                           CvGraphVtx** inserted_vtx CV_DEFAULT(NULL) );\n\n\n/** Removes vertex from the graph together with all incident edges */\nCVAPI(int)  cvGraphRemoveVtx( CvGraph* graph, int index );\nCVAPI(int)  cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx );\n\n\n/** Link two vertices specifed by indices or pointers if they\n   are not connected or return pointer to already existing edge\n   connecting the vertices.\n   Functions return 1 if a new edge was created, 0 otherwise */\nCVAPI(int)  cvGraphAddEdge( CvGraph* graph,\n                            int start_idx, int end_idx,\n                            const CvGraphEdge* edge CV_DEFAULT(NULL),\n                            CvGraphEdge** inserted_edge CV_DEFAULT(NULL) );\n\nCVAPI(int)  cvGraphAddEdgeByPtr( CvGraph* graph,\n                               CvGraphVtx* start_vtx, CvGraphVtx* end_vtx,\n                               const CvGraphEdge* edge CV_DEFAULT(NULL),\n                               CvGraphEdge** inserted_edge CV_DEFAULT(NULL) );\n\n/** Remove edge connecting two vertices */\nCVAPI(void)  cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx );\nCVAPI(void)  cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx,\n                                     CvGraphVtx* end_vtx );\n\n/** Find edge connecting two vertices */\nCVAPI(CvGraphEdge*)  cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx );\nCVAPI(CvGraphEdge*)  cvFindGraphEdgeByPtr( const CvGraph* graph,\n                                           const CvGraphVtx* start_vtx,\n                                           const CvGraphVtx* end_vtx );\n#define cvGraphFindEdge cvFindGraphEdge\n#define cvGraphFindEdgeByPtr cvFindGraphEdgeByPtr\n\n/** Remove all vertices and edges from the graph */\nCVAPI(void)  cvClearGraph( CvGraph* graph );\n\n\n/** Count number of edges incident to the vertex */\nCVAPI(int)  cvGraphVtxDegree( const CvGraph* graph, int vtx_idx );\nCVAPI(int)  cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vtx );\n\n\n/** Retrieves graph vertex by given index */\n#define cvGetGraphVtx( graph, idx ) (CvGraphVtx*)cvGetSetElem((CvSet*)(graph), (idx))\n\n/** Retrieves index of a graph vertex given its pointer */\n#define cvGraphVtxIdx( graph, vtx ) ((vtx)->flags & CV_SET_ELEM_IDX_MASK)\n\n/** Retrieves index of a graph edge given its pointer */\n#define cvGraphEdgeIdx( graph, edge ) ((edge)->flags & CV_SET_ELEM_IDX_MASK)\n\n#define cvGraphGetVtxCount( graph ) ((graph)->active_count)\n#define cvGraphGetEdgeCount( graph ) ((graph)->edges->active_count)\n\n#define  CV_GRAPH_VERTEX        1\n#define  CV_GRAPH_TREE_EDGE     2\n#define  CV_GRAPH_BACK_EDGE     4\n#define  CV_GRAPH_FORWARD_EDGE  8\n#define  CV_GRAPH_CROSS_EDGE    16\n#define  CV_GRAPH_ANY_EDGE      30\n#define  CV_GRAPH_NEW_TREE      32\n#define  CV_GRAPH_BACKTRACKING  64\n#define  CV_GRAPH_OVER          -1\n\n#define  CV_GRAPH_ALL_ITEMS    -1\n\n/** flags for graph vertices and edges */\n#define  CV_GRAPH_ITEM_VISITED_FLAG  (1 << 30)\n#define  CV_IS_GRAPH_VERTEX_VISITED(vtx) \\\n    (((CvGraphVtx*)(vtx))->flags & CV_GRAPH_ITEM_VISITED_FLAG)\n#define  CV_IS_GRAPH_EDGE_VISITED(edge) \\\n    (((CvGraphEdge*)(edge))->flags & CV_GRAPH_ITEM_VISITED_FLAG)\n#define  CV_GRAPH_SEARCH_TREE_NODE_FLAG   (1 << 29)\n#define  CV_GRAPH_FORWARD_EDGE_FLAG       (1 << 28)\n\ntypedef struct CvGraphScanner\n{\n    CvGraphVtx* vtx;       /* current graph vertex (or current edge origin) */\n    CvGraphVtx* dst;       /* current graph edge destination vertex */\n    CvGraphEdge* edge;     /* current edge */\n\n    CvGraph* graph;        /* the graph */\n    CvSeq*   stack;        /* the graph vertex stack */\n    int      index;        /* the lower bound of certainly visited vertices */\n    int      mask;         /* event mask */\n}\nCvGraphScanner;\n\n/** Creates new graph scanner. */\nCVAPI(CvGraphScanner*)  cvCreateGraphScanner( CvGraph* graph,\n                                             CvGraphVtx* vtx CV_DEFAULT(NULL),\n                                             int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS));\n\n/** Releases graph scanner. */\nCVAPI(void) cvReleaseGraphScanner( CvGraphScanner** scanner );\n\n/** Get next graph element */\nCVAPI(int)  cvNextGraphItem( CvGraphScanner* scanner );\n\n/** Creates a copy of graph */\nCVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage );\n\n\n/** Does look-up transformation. Elements of the source array\n   (that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */\nCVAPI(void) cvLUT( const CvArr* src, CvArr* dst, const CvArr* lut );\n\n\n/******************* Iteration through the sequence tree *****************/\ntypedef struct CvTreeNodeIterator\n{\n    const void* node;\n    int level;\n    int max_level;\n}\nCvTreeNodeIterator;\n\nCVAPI(void) cvInitTreeNodeIterator( CvTreeNodeIterator* tree_iterator,\n                                   const void* first, int max_level );\nCVAPI(void*) cvNextTreeNode( CvTreeNodeIterator* tree_iterator );\nCVAPI(void*) cvPrevTreeNode( CvTreeNodeIterator* tree_iterator );\n\n/** Inserts sequence into tree with specified \"parent\" sequence.\n   If parent is equal to frame (e.g. the most external contour),\n   then added contour will have null pointer to parent. */\nCVAPI(void) cvInsertNodeIntoTree( void* node, void* parent, void* frame );\n\n/** Removes contour from tree (together with the contour children). */\nCVAPI(void) cvRemoveNodeFromTree( void* node, void* frame );\n\n/** Gathers pointers to all the sequences,\n   accessible from the `first`, to the single sequence */\nCVAPI(CvSeq*) cvTreeToNodeSeq( const void* first, int header_size,\n                              CvMemStorage* storage );\n\n/** The function implements the K-means algorithm for clustering an array of sample\n   vectors in a specified number of classes */\n#define CV_KMEANS_USE_INITIAL_LABELS    1\nCVAPI(int) cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels,\n                      CvTermCriteria termcrit, int attempts CV_DEFAULT(1),\n                      CvRNG* rng CV_DEFAULT(0), int flags CV_DEFAULT(0),\n                      CvArr* _centers CV_DEFAULT(0), double* compactness CV_DEFAULT(0) );\n\n/****************************************************************************************\\\n*                                    System functions                                    *\n\\****************************************************************************************/\n\n/** Loads optimized functions from IPP, MKL etc. or switches back to pure C code */\nCVAPI(int)  cvUseOptimized( int on_off );\n\ntypedef IplImage* (CV_STDCALL* Cv_iplCreateImageHeader)\n                            (int,int,int,char*,char*,int,int,int,int,int,\n                            IplROI*,IplImage*,void*,IplTileInfo*);\ntypedef void (CV_STDCALL* Cv_iplAllocateImageData)(IplImage*,int,int);\ntypedef void (CV_STDCALL* Cv_iplDeallocate)(IplImage*,int);\ntypedef IplROI* (CV_STDCALL* Cv_iplCreateROI)(int,int,int,int,int);\ntypedef IplImage* (CV_STDCALL* Cv_iplCloneImage)(const IplImage*);\n\n/** @brief Makes OpenCV use IPL functions for allocating IplImage and IplROI structures.\n\nNormally, the function is not called directly. Instead, a simple macro\nCV_TURN_ON_IPL_COMPATIBILITY() is used that calls cvSetIPLAllocators and passes there pointers\nto IPL allocation functions. :\n@code\n    ...\n    CV_TURN_ON_IPL_COMPATIBILITY()\n    ...\n@endcode\n@param create_header pointer to a function, creating IPL image header.\n@param allocate_data pointer to a function, allocating IPL image data.\n@param deallocate pointer to a function, deallocating IPL image.\n@param create_roi pointer to a function, creating IPL image ROI (i.e. Region of Interest).\n@param clone_image pointer to a function, cloning an IPL image.\n */\nCVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header,\n                               Cv_iplAllocateImageData allocate_data,\n                               Cv_iplDeallocate deallocate,\n                               Cv_iplCreateROI create_roi,\n                               Cv_iplCloneImage clone_image );\n\n#define CV_TURN_ON_IPL_COMPATIBILITY()                                  \\\n    cvSetIPLAllocators( iplCreateImageHeader, iplAllocateImage,         \\\n                        iplDeallocate, iplCreateROI, iplCloneImage )\n\n/****************************************************************************************\\\n*                                    Data Persistence                                    *\n\\****************************************************************************************/\n\n/********************************** High-level functions ********************************/\n\n/** @brief Opens file storage for reading or writing data.\n\nThe function opens file storage for reading or writing data. In the latter case, a new file is\ncreated or an existing file is rewritten. The type of the read or written file is determined by the\nfilename extension: .xml for XML and .yml or .yaml for YAML. The function returns a pointer to the\nCvFileStorage structure. If the file cannot be opened then the function returns NULL.\n@param filename Name of the file associated with the storage\n@param memstorage Memory storage used for temporary data and for\n:   storing dynamic structures, such as CvSeq or CvGraph . If it is NULL, a temporary memory\n    storage is created and used.\n@param flags Can be one of the following:\n> -   **CV_STORAGE_READ** the storage is open for reading\n> -   **CV_STORAGE_WRITE** the storage is open for writing\n@param encoding\n */\nCVAPI(CvFileStorage*)  cvOpenFileStorage( const char* filename, CvMemStorage* memstorage,\n                                          int flags, const char* encoding CV_DEFAULT(NULL) );\n\n/** @brief Releases file storage.\n\nThe function closes the file associated with the storage and releases all the temporary structures.\nIt must be called after all I/O operations with the storage are finished.\n@param fs Double pointer to the released file storage\n */\nCVAPI(void) cvReleaseFileStorage( CvFileStorage** fs );\n\n/** returns attribute value or 0 (NULL) if there is no such attribute */\nCVAPI(const char*) cvAttrValue( const CvAttrList* attr, const char* attr_name );\n\n/** @brief Starts writing a new structure.\n\nThe function starts writing a compound structure (collection) that can be a sequence or a map. After\nall the structure fields, which can be scalars or structures, are written, cvEndWriteStruct should\nbe called. The function can be used to group some objects or to implement the write function for a\nsome user object (see CvTypeInfo).\n@param fs File storage\n@param name Name of the written structure. The structure can be accessed by this name when the\nstorage is read.\n@param struct_flags A combination one of the following values:\n-   **CV_NODE_SEQ** the written structure is a sequence (see discussion of CvFileStorage ),\n    that is, its elements do not have a name.\n-   **CV_NODE_MAP** the written structure is a map (see discussion of CvFileStorage ), that\n    is, all its elements have names.\nOne and only one of the two above flags must be specified\n-   **CV_NODE_FLOW** the optional flag that makes sense only for YAML streams. It means that\n     the structure is written as a flow (not as a block), which is more compact. It is\n     recommended to use this flag for structures or arrays whose elements are all scalars.\n@param type_name Optional parameter - the object type name. In\n    case of XML it is written as a type_id attribute of the structure opening tag. In the case of\n    YAML it is written after a colon following the structure name (see the example in\n    CvFileStorage description). Mainly it is used with user objects. When the storage is read, the\n    encoded type name is used to determine the object type (see CvTypeInfo and cvFindType ).\n@param attributes This parameter is not used in the current implementation\n */\nCVAPI(void) cvStartWriteStruct( CvFileStorage* fs, const char* name,\n                                int struct_flags, const char* type_name CV_DEFAULT(NULL),\n                                CvAttrList attributes CV_DEFAULT(cvAttrList()));\n\n/** @brief Finishes writing to a file node collection.\n@param fs File storage\n@sa cvStartWriteStruct.\n */\nCVAPI(void) cvEndWriteStruct( CvFileStorage* fs );\n\n/** @brief Writes an integer value.\n\nThe function writes a single integer value (with or without a name) to the file storage.\n@param fs File storage\n@param name Name of the written value. Should be NULL if and only if the parent structure is a\nsequence.\n@param value The written value\n */\nCVAPI(void) cvWriteInt( CvFileStorage* fs, const char* name, int value );\n\n/** @brief Writes a floating-point value.\n\nThe function writes a single floating-point value (with or without a name) to file storage. Special\nvalues are encoded as follows: NaN (Not A Number) as .NaN, infinity as +.Inf or -.Inf.\n\nThe following example shows how to use the low-level writing functions to store custom structures,\nsuch as termination criteria, without registering a new type. :\n@code\n    void write_termcriteria( CvFileStorage* fs, const char* struct_name,\n                             CvTermCriteria* termcrit )\n    {\n        cvStartWriteStruct( fs, struct_name, CV_NODE_MAP, NULL, cvAttrList(0,0));\n        cvWriteComment( fs, \"termination criteria\", 1 ); // just a description\n        if( termcrit->type & CV_TERMCRIT_ITER )\n            cvWriteInteger( fs, \"max_iterations\", termcrit->max_iter );\n        if( termcrit->type & CV_TERMCRIT_EPS )\n            cvWriteReal( fs, \"accuracy\", termcrit->epsilon );\n        cvEndWriteStruct( fs );\n    }\n@endcode\n@param fs File storage\n@param name Name of the written value. Should be NULL if and only if the parent structure is a\nsequence.\n@param value The written value\n*/\nCVAPI(void) cvWriteReal( CvFileStorage* fs, const char* name, double value );\n\n/** @brief Writes a text string.\n\nThe function writes a text string to file storage.\n@param fs File storage\n@param name Name of the written string . Should be NULL if and only if the parent structure is a\nsequence.\n@param str The written text string\n@param quote If non-zero, the written string is put in quotes, regardless of whether they are\nrequired. Otherwise, if the flag is zero, quotes are used only when they are required (e.g. when\nthe string starts with a digit or contains spaces).\n */\nCVAPI(void) cvWriteString( CvFileStorage* fs, const char* name,\n                           const char* str, int quote CV_DEFAULT(0) );\n\n/** @brief Writes a comment.\n\nThe function writes a comment into file storage. The comments are skipped when the storage is read.\n@param fs File storage\n@param comment The written comment, single-line or multi-line\n@param eol_comment If non-zero, the function tries to put the comment at the end of current line.\nIf the flag is zero, if the comment is multi-line, or if it does not fit at the end of the current\nline, the comment starts a new line.\n */\nCVAPI(void) cvWriteComment( CvFileStorage* fs, const char* comment,\n                            int eol_comment );\n\n/** @brief Writes an object to file storage.\n\nThe function writes an object to file storage. First, the appropriate type info is found using\ncvTypeOf. Then, the write method associated with the type info is called.\n\nAttributes are used to customize the writing procedure. The standard types support the following\nattributes (all the dt attributes have the same format as in cvWriteRawData):\n\n-# CvSeq\n    -   **header_dt** description of user fields of the sequence header that follow CvSeq, or\n        CvChain (if the sequence is a Freeman chain) or CvContour (if the sequence is a contour or\n        point sequence)\n    -   **dt** description of the sequence elements.\n    -   **recursive** if the attribute is present and is not equal to \"0\" or \"false\", the whole\n        tree of sequences (contours) is stored.\n-# CvGraph\n    -   **header_dt** description of user fields of the graph header that follows CvGraph;\n    -   **vertex_dt** description of user fields of graph vertices\n    -   **edge_dt** description of user fields of graph edges (note that the edge weight is\n        always written, so there is no need to specify it explicitly)\n\nBelow is the code that creates the YAML file shown in the CvFileStorage description:\n@code\n    #include \"cxcore.h\"\n\n    int main( int argc, char** argv )\n    {\n        CvMat* mat = cvCreateMat( 3, 3, CV_32F );\n        CvFileStorage* fs = cvOpenFileStorage( \"example.yml\", 0, CV_STORAGE_WRITE );\n\n        cvSetIdentity( mat );\n        cvWrite( fs, \"A\", mat, cvAttrList(0,0) );\n\n        cvReleaseFileStorage( &fs );\n        cvReleaseMat( &mat );\n        return 0;\n    }\n@endcode\n@param fs File storage\n@param name Name of the written object. Should be NULL if and only if the parent structure is a\nsequence.\n@param ptr Pointer to the object\n@param attributes The attributes of the object. They are specific for each particular type (see\nthe discussion below).\n */\nCVAPI(void) cvWrite( CvFileStorage* fs, const char* name, const void* ptr,\n                         CvAttrList attributes CV_DEFAULT(cvAttrList()));\n\n/** @brief Starts the next stream.\n\nThe function finishes the currently written stream and starts the next stream. In the case of XML\nthe file with multiple streams looks like this:\n@code{.xml}\n    <opencv_storage>\n    <!-- stream #1 data -->\n    </opencv_storage>\n    <opencv_storage>\n    <!-- stream #2 data -->\n    </opencv_storage>\n    ...\n@endcode\nThe YAML file will look like this:\n@code{.yaml}\n    %YAML:1.0\n    # stream #1 data\n    ...\n    ---\n    # stream #2 data\n@endcode\nThis is useful for concatenating files or for resuming the writing process.\n@param fs File storage\n */\nCVAPI(void) cvStartNextStream( CvFileStorage* fs );\n\n/** @brief Writes multiple numbers.\n\nThe function writes an array, whose elements consist of single or multiple numbers. The function\ncall can be replaced with a loop containing a few cvWriteInt and cvWriteReal calls, but a single\ncall is more efficient. Note that because none of the elements have a name, they should be written\nto a sequence rather than a map.\n@param fs File storage\n@param src Pointer to the written array\n@param len Number of the array elements to write\n@param dt Specification of each array element, see @ref format_spec \"format specification\"\n */\nCVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src,\n                                int len, const char* dt );\n\n/** @brief Returns a unique pointer for a given name.\n\nThe function returns a unique pointer for each particular file node name. This pointer can be then\npassed to the cvGetFileNode function that is faster than cvGetFileNodeByName because it compares\ntext strings by comparing pointers rather than the strings' content.\n\nConsider the following example where an array of points is encoded as a sequence of 2-entry maps:\n@code\n    points:\n      - { x: 10, y: 10 }\n      - { x: 20, y: 20 }\n      - { x: 30, y: 30 }\n      # ...\n@endcode\nThen, it is possible to get hashed \"x\" and \"y\" pointers to speed up decoding of the points. :\n@code\n    #include \"cxcore.h\"\n\n    int main( int argc, char** argv )\n    {\n        CvFileStorage* fs = cvOpenFileStorage( \"points.yml\", 0, CV_STORAGE_READ );\n        CvStringHashNode* x_key = cvGetHashedNode( fs, \"x\", -1, 1 );\n        CvStringHashNode* y_key = cvGetHashedNode( fs, \"y\", -1, 1 );\n        CvFileNode* points = cvGetFileNodeByName( fs, 0, \"points\" );\n\n        if( CV_NODE_IS_SEQ(points->tag) )\n        {\n            CvSeq* seq = points->data.seq;\n            int i, total = seq->total;\n            CvSeqReader reader;\n            cvStartReadSeq( seq, &reader, 0 );\n            for( i = 0; i < total; i++ )\n            {\n                CvFileNode* pt = (CvFileNode*)reader.ptr;\n    #if 1 // faster variant\n                CvFileNode* xnode = cvGetFileNode( fs, pt, x_key, 0 );\n                CvFileNode* ynode = cvGetFileNode( fs, pt, y_key, 0 );\n                assert( xnode && CV_NODE_IS_INT(xnode->tag) &&\n                        ynode && CV_NODE_IS_INT(ynode->tag));\n                int x = xnode->data.i; // or x = cvReadInt( xnode, 0 );\n                int y = ynode->data.i; // or y = cvReadInt( ynode, 0 );\n    #elif 1 // slower variant; does not use x_key & y_key\n                CvFileNode* xnode = cvGetFileNodeByName( fs, pt, \"x\" );\n                CvFileNode* ynode = cvGetFileNodeByName( fs, pt, \"y\" );\n                assert( xnode && CV_NODE_IS_INT(xnode->tag) &&\n                        ynode && CV_NODE_IS_INT(ynode->tag));\n                int x = xnode->data.i; // or x = cvReadInt( xnode, 0 );\n                int y = ynode->data.i; // or y = cvReadInt( ynode, 0 );\n    #else // the slowest yet the easiest to use variant\n                int x = cvReadIntByName( fs, pt, \"x\", 0 );\n                int y = cvReadIntByName( fs, pt, \"y\", 0 );\n    #endif\n                CV_NEXT_SEQ_ELEM( seq->elem_size, reader );\n                printf(\"\n            }\n        }\n        cvReleaseFileStorage( &fs );\n        return 0;\n    }\n@endcode\nPlease note that whatever method of accessing a map you are using, it is still much slower than\nusing plain sequences; for example, in the above example, it is more efficient to encode the points\nas pairs of integers in a single numeric sequence.\n@param fs File storage\n@param name Literal node name\n@param len Length of the name (if it is known apriori), or -1 if it needs to be calculated\n@param create_missing Flag that specifies, whether an absent key should be added into the hash table\n*/\nCVAPI(CvStringHashNode*) cvGetHashedKey( CvFileStorage* fs, const char* name,\n                                        int len CV_DEFAULT(-1),\n                                        int create_missing CV_DEFAULT(0));\n\n/** @brief Retrieves one of the top-level nodes of the file storage.\n\nThe function returns one of the top-level file nodes. The top-level nodes do not have a name, they\ncorrespond to the streams that are stored one after another in the file storage. If the index is out\nof range, the function returns a NULL pointer, so all the top-level nodes can be iterated by\nsubsequent calls to the function with stream_index=0,1,..., until the NULL pointer is returned.\nThis function can be used as a base for recursive traversal of the file storage.\n@param fs File storage\n@param stream_index Zero-based index of the stream. See cvStartNextStream . In most cases,\nthere is only one stream in the file; however, there can be several.\n */\nCVAPI(CvFileNode*) cvGetRootFileNode( const CvFileStorage* fs,\n                                     int stream_index CV_DEFAULT(0) );\n\n/** @brief Finds a node in a map or file storage.\n\nThe function finds a file node. It is a faster version of cvGetFileNodeByName (see\ncvGetHashedKey discussion). Also, the function can insert a new node, if it is not in the map yet.\n@param fs File storage\n@param map The parent map. If it is NULL, the function searches a top-level node. If both map and\nkey are NULLs, the function returns the root file node - a map that contains top-level nodes.\n@param key Unique pointer to the node name, retrieved with cvGetHashedKey\n@param create_missing Flag that specifies whether an absent node should be added to the map\n */\nCVAPI(CvFileNode*) cvGetFileNode( CvFileStorage* fs, CvFileNode* map,\n                                 const CvStringHashNode* key,\n                                 int create_missing CV_DEFAULT(0) );\n\n/** @brief Finds a node in a map or file storage.\n\nThe function finds a file node by name. The node is searched either in map or, if the pointer is\nNULL, among the top-level file storage nodes. Using this function for maps and cvGetSeqElem (or\nsequence reader) for sequences, it is possible to navigate through the file storage. To speed up\nmultiple queries for a certain key (e.g., in the case of an array of structures) one may use a\ncombination of cvGetHashedKey and cvGetFileNode.\n@param fs File storage\n@param map The parent map. If it is NULL, the function searches in all the top-level nodes\n(streams), starting with the first one.\n@param name The file node name\n */\nCVAPI(CvFileNode*) cvGetFileNodeByName( const CvFileStorage* fs,\n                                       const CvFileNode* map,\n                                       const char* name );\n\n/** @brief Retrieves an integer value from a file node.\n\nThe function returns an integer that is represented by the file node. If the file node is NULL, the\ndefault_value is returned (thus, it is convenient to call the function right after cvGetFileNode\nwithout checking for a NULL pointer). If the file node has type CV_NODE_INT, then node-\\>data.i is\nreturned. If the file node has type CV_NODE_REAL, then node-\\>data.f is converted to an integer\nand returned. Otherwise the error is reported.\n@param node File node\n@param default_value The value that is returned if node is NULL\n */\nCV_INLINE int cvReadInt( const CvFileNode* node, int default_value CV_DEFAULT(0) )\n{\n    return !node ? default_value :\n        CV_NODE_IS_INT(node->tag) ? node->data.i :\n        CV_NODE_IS_REAL(node->tag) ? cvRound(node->data.f) : 0x7fffffff;\n}\n\n/** @brief Finds a file node and returns its value.\n\nThe function is a simple superposition of cvGetFileNodeByName and cvReadInt.\n@param fs File storage\n@param map The parent map. If it is NULL, the function searches a top-level node.\n@param name The node name\n@param default_value The value that is returned if the file node is not found\n */\nCV_INLINE int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map,\n                         const char* name, int default_value CV_DEFAULT(0) )\n{\n    return cvReadInt( cvGetFileNodeByName( fs, map, name ), default_value );\n}\n\n/** @brief Retrieves a floating-point value from a file node.\n\nThe function returns a floating-point value that is represented by the file node. If the file node\nis NULL, the default_value is returned (thus, it is convenient to call the function right after\ncvGetFileNode without checking for a NULL pointer). If the file node has type CV_NODE_REAL ,\nthen node-\\>data.f is returned. If the file node has type CV_NODE_INT , then node-:math:\\>data.f\nis converted to floating-point and returned. Otherwise the result is not determined.\n@param node File node\n@param default_value The value that is returned if node is NULL\n */\nCV_INLINE double cvReadReal( const CvFileNode* node, double default_value CV_DEFAULT(0.) )\n{\n    return !node ? default_value :\n        CV_NODE_IS_INT(node->tag) ? (double)node->data.i :\n        CV_NODE_IS_REAL(node->tag) ? node->data.f : 1e300;\n}\n\n/** @brief Finds a file node and returns its value.\n\nThe function is a simple superposition of cvGetFileNodeByName and cvReadReal .\n@param fs File storage\n@param map The parent map. If it is NULL, the function searches a top-level node.\n@param name The node name\n@param default_value The value that is returned if the file node is not found\n */\nCV_INLINE double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map,\n                        const char* name, double default_value CV_DEFAULT(0.) )\n{\n    return cvReadReal( cvGetFileNodeByName( fs, map, name ), default_value );\n}\n\n/** @brief Retrieves a text string from a file node.\n\nThe function returns a text string that is represented by the file node. If the file node is NULL,\nthe default_value is returned (thus, it is convenient to call the function right after\ncvGetFileNode without checking for a NULL pointer). If the file node has type CV_NODE_STR , then\nnode-:math:\\>data.str.ptr is returned. Otherwise the result is not determined.\n@param node File node\n@param default_value The value that is returned if node is NULL\n */\nCV_INLINE const char* cvReadString( const CvFileNode* node,\n                        const char* default_value CV_DEFAULT(NULL) )\n{\n    return !node ? default_value : CV_NODE_IS_STRING(node->tag) ? node->data.str.ptr : 0;\n}\n\n/** @brief Finds a file node by its name and returns its value.\n\nThe function is a simple superposition of cvGetFileNodeByName and cvReadString .\n@param fs File storage\n@param map The parent map. If it is NULL, the function searches a top-level node.\n@param name The node name\n@param default_value The value that is returned if the file node is not found\n */\nCV_INLINE const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map,\n                        const char* name, const char* default_value CV_DEFAULT(NULL) )\n{\n    return cvReadString( cvGetFileNodeByName( fs, map, name ), default_value );\n}\n\n\n/** @brief Decodes an object and returns a pointer to it.\n\nThe function decodes a user object (creates an object in a native representation from the file\nstorage subtree) and returns it. The object to be decoded must be an instance of a registered type\nthat supports the read method (see CvTypeInfo). The type of the object is determined by the type\nname that is encoded in the file. If the object is a dynamic structure, it is created either in\nmemory storage and passed to cvOpenFileStorage or, if a NULL pointer was passed, in temporary\nmemory storage, which is released when cvReleaseFileStorage is called. Otherwise, if the object is\nnot a dynamic structure, it is created in a heap and should be released with a specialized function\nor by using the generic cvRelease.\n@param fs File storage\n@param node The root object node\n@param attributes Unused parameter\n */\nCVAPI(void*) cvRead( CvFileStorage* fs, CvFileNode* node,\n                        CvAttrList* attributes CV_DEFAULT(NULL));\n\n/** @brief Finds an object by name and decodes it.\n\nThe function is a simple superposition of cvGetFileNodeByName and cvRead.\n@param fs File storage\n@param map The parent map. If it is NULL, the function searches a top-level node.\n@param name The node name\n@param attributes Unused parameter\n */\nCV_INLINE void* cvReadByName( CvFileStorage* fs, const CvFileNode* map,\n                              const char* name, CvAttrList* attributes CV_DEFAULT(NULL) )\n{\n    return cvRead( fs, cvGetFileNodeByName( fs, map, name ), attributes );\n}\n\n\n/** @brief Initializes the file node sequence reader.\n\nThe function initializes the sequence reader to read data from a file node. The initialized reader\ncan be then passed to cvReadRawDataSlice.\n@param fs File storage\n@param src The file node (a sequence) to read numbers from\n@param reader Pointer to the sequence reader\n */\nCVAPI(void) cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src,\n                               CvSeqReader* reader );\n\n/** @brief Initializes file node sequence reader.\n\nThe function reads one or more elements from the file node, representing a sequence, to a\nuser-specified array. The total number of read sequence elements is a product of total and the\nnumber of components in each array element. For example, if dt=2if, the function will read total\\*3\nsequence elements. As with any sequence, some parts of the file node sequence can be skipped or read\nrepeatedly by repositioning the reader using cvSetSeqReaderPos.\n@param fs File storage\n@param reader The sequence reader. Initialize it with cvStartReadRawData .\n@param count The number of elements to read\n@param dst Pointer to the destination array\n@param dt Specification of each array element. It has the same format as in cvWriteRawData .\n */\nCVAPI(void) cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader,\n                               int count, void* dst, const char* dt );\n\n/** @brief Reads multiple numbers.\n\nThe function reads elements from a file node that represents a sequence of scalars.\n@param fs File storage\n@param src The file node (a sequence) to read numbers from\n@param dst Pointer to the destination array\n@param dt Specification of each array element. It has the same format as in cvWriteRawData .\n */\nCVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src,\n                          void* dst, const char* dt );\n\n/** @brief Writes a file node to another file storage.\n\nThe function writes a copy of a file node to file storage. Possible applications of the function are\nmerging several file storages into one and conversion between XML and YAML formats.\n@param fs Destination file storage\n@param new_node_name New name of the file node in the destination file storage. To keep the\nexisting name, use cvcvGetFileNodeName\n@param node The written node\n@param embed If the written node is a collection and this parameter is not zero, no extra level of\nhierarchy is created. Instead, all the elements of node are written into the currently written\nstructure. Of course, map elements can only be embedded into another map, and sequence elements\ncan only be embedded into another sequence.\n */\nCVAPI(void) cvWriteFileNode( CvFileStorage* fs, const char* new_node_name,\n                            const CvFileNode* node, int embed );\n\n/** @brief Returns the name of a file node.\n\nThe function returns the name of a file node or NULL, if the file node does not have a name or if\nnode is NULL.\n@param node File node\n */\nCVAPI(const char*) cvGetFileNodeName( const CvFileNode* node );\n\n/*********************************** Adding own types ***********************************/\n\n/** @brief Registers a new type.\n\nThe function registers a new type, which is described by info . The function creates a copy of the\nstructure, so the user should delete it after calling the function.\n@param info Type info structure\n */\nCVAPI(void) cvRegisterType( const CvTypeInfo* info );\n\n/** @brief Unregisters the type.\n\nThe function unregisters a type with a specified name. If the name is unknown, it is possible to\nlocate the type info by an instance of the type using cvTypeOf or by iterating the type list,\nstarting from cvFirstType, and then calling cvUnregisterType(info-\\>typeName).\n@param type_name Name of an unregistered type\n */\nCVAPI(void) cvUnregisterType( const char* type_name );\n\n/** @brief Returns the beginning of a type list.\n\nThe function returns the first type in the list of registered types. Navigation through the list can\nbe done via the prev and next fields of the CvTypeInfo structure.\n */\nCVAPI(CvTypeInfo*) cvFirstType(void);\n\n/** @brief Finds a type by its name.\n\nThe function finds a registered type by its name. It returns NULL if there is no type with the\nspecified name.\n@param type_name Type name\n */\nCVAPI(CvTypeInfo*) cvFindType( const char* type_name );\n\n/** @brief Returns the type of an object.\n\nThe function finds the type of a given object. It iterates through the list of registered types and\ncalls the is_instance function/method for every type info structure with that object until one of\nthem returns non-zero or until the whole list has been traversed. In the latter case, the function\nreturns NULL.\n@param struct_ptr The object pointer\n */\nCVAPI(CvTypeInfo*) cvTypeOf( const void* struct_ptr );\n\n/** @brief Releases an object.\n\nThe function finds the type of a given object and calls release with the double pointer.\n@param struct_ptr Double pointer to the object\n */\nCVAPI(void) cvRelease( void** struct_ptr );\n\n/** @brief Makes a clone of an object.\n\nThe function finds the type of a given object and calls clone with the passed object. Of course, if\nyou know the object type, for example, struct_ptr is CvMat\\*, it is faster to call the specific\nfunction, like cvCloneMat.\n@param struct_ptr The object to clone\n */\nCVAPI(void*) cvClone( const void* struct_ptr );\n\n/** @brief Saves an object to a file.\n\nThe function saves an object to a file. It provides a simple interface to cvWrite .\n@param filename File name\n@param struct_ptr Object to save\n@param name Optional object name. If it is NULL, the name will be formed from filename .\n@param comment Optional comment to put in the beginning of the file\n@param attributes Optional attributes passed to cvWrite\n */\nCVAPI(void) cvSave( const char* filename, const void* struct_ptr,\n                    const char* name CV_DEFAULT(NULL),\n                    const char* comment CV_DEFAULT(NULL),\n                    CvAttrList attributes CV_DEFAULT(cvAttrList()));\n\n/** @brief Loads an object from a file.\n\nThe function loads an object from a file. It basically reads the specified file, find the first\ntop-level node and calls cvRead for that node. If the file node does not have type information or\nthe type information can not be found by the type name, the function returns NULL. After the object\nis loaded, the file storage is closed and all the temporary buffers are deleted. Thus, to load a\ndynamic structure, such as a sequence, contour, or graph, one should pass a valid memory storage\ndestination to the function.\n@param filename File name\n@param memstorage Memory storage for dynamic structures, such as CvSeq or CvGraph . It is not used\nfor matrices or images.\n@param name Optional object name. If it is NULL, the first top-level object in the storage will be\nloaded.\n@param real_name Optional output parameter that will contain the name of the loaded object\n(useful if name=NULL )\n */\nCVAPI(void*) cvLoad( const char* filename,\n                     CvMemStorage* memstorage CV_DEFAULT(NULL),\n                     const char* name CV_DEFAULT(NULL),\n                     const char** real_name CV_DEFAULT(NULL) );\n\n/*********************************** Measuring Execution Time ***************************/\n\n/** helper functions for RNG initialization and accurate time measurement:\n   uses internal clock counter on x86 */\nCVAPI(int64)  cvGetTickCount( void );\nCVAPI(double) cvGetTickFrequency( void );\n\n/*********************************** CPU capabilities ***********************************/\n\nCVAPI(int) cvCheckHardwareSupport(int feature);\n\n/*********************************** Multi-Threading ************************************/\n\n/** retrieve/set the number of threads used in OpenMP implementations */\nCVAPI(int)  cvGetNumThreads( void );\nCVAPI(void) cvSetNumThreads( int threads CV_DEFAULT(0) );\n/** get index of the thread being executed */\nCVAPI(int)  cvGetThreadNum( void );\n\n\n/********************************** Error Handling **************************************/\n\n/** Get current OpenCV error status */\nCVAPI(int) cvGetErrStatus( void );\n\n/** Sets error status silently */\nCVAPI(void) cvSetErrStatus( int status );\n\n#define CV_ErrModeLeaf     0   /* Print error and exit program */\n#define CV_ErrModeParent   1   /* Print error and continue */\n#define CV_ErrModeSilent   2   /* Don't print and continue */\n\n/** Retrives current error processing mode */\nCVAPI(int)  cvGetErrMode( void );\n\n/** Sets error processing mode, returns previously used mode */\nCVAPI(int) cvSetErrMode( int mode );\n\n/** Sets error status and performs some additonal actions (displaying message box,\n writing message to stderr, terminating application etc.)\n depending on the current error mode */\nCVAPI(void) cvError( int status, const char* func_name,\n                    const char* err_msg, const char* file_name, int line );\n\n/** Retrieves textual description of the error given its code */\nCVAPI(const char*) cvErrorStr( int status );\n\n/** Retrieves detailed information about the last error occured */\nCVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description,\n                        const char** filename, int* line );\n\n/** Maps IPP error codes to the counterparts from OpenCV */\nCVAPI(int) cvErrorFromIppStatus( int ipp_status );\n\ntypedef int (CV_CDECL *CvErrorCallback)( int status, const char* func_name,\n                                        const char* err_msg, const char* file_name, int line, void* userdata );\n\n/** Assigns a new error-handling function */\nCVAPI(CvErrorCallback) cvRedirectError( CvErrorCallback error_handler,\n                                       void* userdata CV_DEFAULT(NULL),\n                                       void** prev_userdata CV_DEFAULT(NULL) );\n\n/** Output nothing */\nCVAPI(int) cvNulDevReport( int status, const char* func_name, const char* err_msg,\n                          const char* file_name, int line, void* userdata );\n\n/** Output to console(fprintf(stderr,...)) */\nCVAPI(int) cvStdErrReport( int status, const char* func_name, const char* err_msg,\n                          const char* file_name, int line, void* userdata );\n\n/** Output to MessageBox(WIN32) */\nCVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg,\n                          const char* file_name, int line, void* userdata );\n\n#define OPENCV_ERROR(status,func,context)                           \\\ncvError((status),(func),(context),__FILE__,__LINE__)\n\n#define OPENCV_ASSERT(expr,func,context)                            \\\n{if (! (expr))                                      \\\n{OPENCV_ERROR(CV_StsInternal,(func),(context));}}\n\n#define OPENCV_CALL( Func )                                         \\\n{                                                                   \\\nFunc;                                                           \\\n}\n\n\n/** CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */\n#ifdef CV_NO_FUNC_NAMES\n#define CV_FUNCNAME( Name )\n#define cvFuncName \"\"\n#else\n#define CV_FUNCNAME( Name )  \\\nstatic char cvFuncName[] = Name\n#endif\n\n\n/**\n CV_ERROR macro unconditionally raises error with passed code and message.\n After raising error, control will be transferred to the exit label.\n */\n#define CV_ERROR( Code, Msg )                                       \\\n{                                                                   \\\n    cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ );        \\\n    __CV_EXIT__;                                                   \\\n}\n\n/**\n CV_CHECK macro checks error status after CV (or IPL)\n function call. If error detected, control will be transferred to the exit\n label.\n */\n#define CV_CHECK()                                                  \\\n{                                                                   \\\n    if( cvGetErrStatus() < 0 )                                      \\\n        CV_ERROR( CV_StsBackTrace, \"Inner function failed.\" );      \\\n}\n\n\n/**\n CV_CALL macro calls CV (or IPL) function, checks error status and\n signals a error if the function failed. Useful in \"parent node\"\n error procesing mode\n */\n#define CV_CALL( Func )                                             \\\n{                                                                   \\\n    Func;                                                           \\\n    CV_CHECK();                                                     \\\n}\n\n\n/** Runtime assertion macro */\n#define CV_ASSERT( Condition )                                          \\\n{                                                                       \\\n    if( !(Condition) )                                                  \\\n        CV_ERROR( CV_StsInternal, \"Assertion: \" #Condition \" failed\" ); \\\n}\n\n#define __CV_BEGIN__       {\n#define __CV_END__         goto exit; exit: ; }\n#define __CV_EXIT__        goto exit\n\n/** @} core_c */\n\n#ifdef __cplusplus\n} // extern \"C\"\n#endif\n\n#ifdef __cplusplus\n\n//! @addtogroup core_c_glue\n//! @{\n\n//! class for automatic module/RTTI data registration/unregistration\nstruct CV_EXPORTS CvType\n{\n    CvType( const char* type_name,\n            CvIsInstanceFunc is_instance, CvReleaseFunc release=0,\n            CvReadFunc read=0, CvWriteFunc write=0, CvCloneFunc clone=0 );\n    ~CvType();\n    CvTypeInfo* info;\n\n    static CvTypeInfo* first;\n    static CvTypeInfo* last;\n};\n\n//! @}\n\n#include \"opencv2/core/utility.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup core_c_glue\n//! @{\n\n/////////////////////////////////////////// glue ///////////////////////////////////////////\n\n//! converts array (CvMat or IplImage) to cv::Mat\nCV_EXPORTS Mat cvarrToMat(const CvArr* arr, bool copyData=false,\n                          bool allowND=true, int coiMode=0,\n                          AutoBuffer<double>* buf=0);\n\nstatic inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0)\n{\n    return cvarrToMat(arr, copyData, true, coiMode);\n}\n\n\n//! extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it.\nCV_EXPORTS void extractImageCOI(const CvArr* arr, OutputArray coiimg, int coi=-1);\n//! inserts single-channel cv::Mat into a multi-channel CvMat or IplImage\nCV_EXPORTS void insertImageCOI(InputArray coiimg, CvArr* arr, int coi=-1);\n\n\n\n////// specialized implementations of DefaultDeleter::operator() for classic OpenCV types //////\n\ntemplate<> CV_EXPORTS void DefaultDeleter<CvMat>::operator ()(CvMat* obj) const;\ntemplate<> CV_EXPORTS void DefaultDeleter<IplImage>::operator ()(IplImage* obj) const;\ntemplate<> CV_EXPORTS void DefaultDeleter<CvMatND>::operator ()(CvMatND* obj) const;\ntemplate<> CV_EXPORTS void DefaultDeleter<CvSparseMat>::operator ()(CvSparseMat* obj) const;\ntemplate<> CV_EXPORTS void DefaultDeleter<CvMemStorage>::operator ()(CvMemStorage* obj) const;\n\n////////////// convenient wrappers for operating old-style dynamic structures //////////////\n\ntemplate<typename _Tp> class SeqIterator;\n\ntypedef Ptr<CvMemStorage> MemStorage;\n\n/*!\n Template Sequence Class derived from CvSeq\n\n The class provides more convenient access to sequence elements,\n STL-style operations and iterators.\n\n \\note The class is targeted for simple data types,\n    i.e. no constructors or destructors\n    are called for the sequence elements.\n*/\ntemplate<typename _Tp> class Seq\n{\npublic:\n    typedef SeqIterator<_Tp> iterator;\n    typedef SeqIterator<_Tp> const_iterator;\n\n    //! the default constructor\n    Seq();\n    //! the constructor for wrapping CvSeq structure. The real element type in CvSeq should match _Tp.\n    Seq(const CvSeq* seq);\n    //! creates the empty sequence that resides in the specified storage\n    Seq(MemStorage& storage, int headerSize = sizeof(CvSeq));\n    //! returns read-write reference to the specified element\n    _Tp& operator [](int idx);\n    //! returns read-only reference to the specified element\n    const _Tp& operator[](int idx) const;\n    //! returns iterator pointing to the beginning of the sequence\n    SeqIterator<_Tp> begin() const;\n    //! returns iterator pointing to the element following the last sequence element\n    SeqIterator<_Tp> end() const;\n    //! returns the number of elements in the sequence\n    size_t size() const;\n    //! returns the type of sequence elements (CV_8UC1 ... CV_64FC(CV_CN_MAX) ...)\n    int type() const;\n    //! returns the depth of sequence elements (CV_8U ... CV_64F)\n    int depth() const;\n    //! returns the number of channels in each sequence element\n    int channels() const;\n    //! returns the size of each sequence element\n    size_t elemSize() const;\n    //! returns index of the specified sequence element\n    size_t index(const _Tp& elem) const;\n    //! appends the specified element to the end of the sequence\n    void push_back(const _Tp& elem);\n    //! appends the specified element to the front of the sequence\n    void push_front(const _Tp& elem);\n    //! appends zero or more elements to the end of the sequence\n    void push_back(const _Tp* elems, size_t count);\n    //! appends zero or more elements to the front of the sequence\n    void push_front(const _Tp* elems, size_t count);\n    //! inserts the specified element to the specified position\n    void insert(int idx, const _Tp& elem);\n    //! inserts zero or more elements to the specified position\n    void insert(int idx, const _Tp* elems, size_t count);\n    //! removes element at the specified position\n    void remove(int idx);\n    //! removes the specified subsequence\n    void remove(const Range& r);\n\n    //! returns reference to the first sequence element\n    _Tp& front();\n    //! returns read-only reference to the first sequence element\n    const _Tp& front() const;\n    //! returns reference to the last sequence element\n    _Tp& back();\n    //! returns read-only reference to the last sequence element\n    const _Tp& back() const;\n    //! returns true iff the sequence contains no elements\n    bool empty() const;\n\n    //! removes all the elements from the sequence\n    void clear();\n    //! removes the first element from the sequence\n    void pop_front();\n    //! removes the last element from the sequence\n    void pop_back();\n    //! removes zero or more elements from the beginning of the sequence\n    void pop_front(_Tp* elems, size_t count);\n    //! removes zero or more elements from the end of the sequence\n    void pop_back(_Tp* elems, size_t count);\n\n    //! copies the whole sequence or the sequence slice to the specified vector\n    void copyTo(std::vector<_Tp>& vec, const Range& range=Range::all()) const;\n    //! returns the vector containing all the sequence elements\n    operator std::vector<_Tp>() const;\n\n    CvSeq* seq;\n};\n\n\n/*!\n STL-style Sequence Iterator inherited from the CvSeqReader structure\n*/\ntemplate<typename _Tp> class SeqIterator : public CvSeqReader\n{\npublic:\n    //! the default constructor\n    SeqIterator();\n    //! the constructor setting the iterator to the beginning or to the end of the sequence\n    SeqIterator(const Seq<_Tp>& seq, bool seekEnd=false);\n    //! positions the iterator within the sequence\n    void seek(size_t pos);\n    //! reports the current iterator position\n    size_t tell() const;\n    //! returns reference to the current sequence element\n    _Tp& operator *();\n    //! returns read-only reference to the current sequence element\n    const _Tp& operator *() const;\n    //! moves iterator to the next sequence element\n    SeqIterator& operator ++();\n    //! moves iterator to the next sequence element\n    SeqIterator operator ++(int) const;\n    //! moves iterator to the previous sequence element\n    SeqIterator& operator --();\n    //! moves iterator to the previous sequence element\n    SeqIterator operator --(int) const;\n\n    //! moves iterator forward by the specified offset (possibly negative)\n    SeqIterator& operator +=(int);\n    //! moves iterator backward by the specified offset (possibly negative)\n    SeqIterator& operator -=(int);\n\n    // this is index of the current element module seq->total*2\n    // (to distinguish between 0 and seq->total)\n    int index;\n};\n\n\n\n// bridge C++ => C Seq API\nCV_EXPORTS schar*  seqPush( CvSeq* seq, const void* element=0);\nCV_EXPORTS schar*  seqPushFront( CvSeq* seq, const void* element=0);\nCV_EXPORTS void  seqPop( CvSeq* seq, void* element=0);\nCV_EXPORTS void  seqPopFront( CvSeq* seq, void* element=0);\nCV_EXPORTS void  seqPopMulti( CvSeq* seq, void* elements,\n                              int count, int in_front=0 );\nCV_EXPORTS void  seqRemove( CvSeq* seq, int index );\nCV_EXPORTS void  clearSeq( CvSeq* seq );\nCV_EXPORTS schar*  getSeqElem( const CvSeq* seq, int index );\nCV_EXPORTS void  seqRemoveSlice( CvSeq* seq, CvSlice slice );\nCV_EXPORTS void  seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr );\n\ntemplate<typename _Tp> inline Seq<_Tp>::Seq() : seq(0) {}\ntemplate<typename _Tp> inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq)\n{\n    CV_Assert(!_seq || _seq->elem_size == sizeof(_Tp));\n}\n\ntemplate<typename _Tp> inline Seq<_Tp>::Seq( MemStorage& storage,\n                                             int headerSize )\n{\n    CV_Assert(headerSize >= (int)sizeof(CvSeq));\n    seq = cvCreateSeq(DataType<_Tp>::type, headerSize, sizeof(_Tp), storage);\n}\n\ntemplate<typename _Tp> inline _Tp& Seq<_Tp>::operator [](int idx)\n{ return *(_Tp*)getSeqElem(seq, idx); }\n\ntemplate<typename _Tp> inline const _Tp& Seq<_Tp>::operator [](int idx) const\n{ return *(_Tp*)getSeqElem(seq, idx); }\n\ntemplate<typename _Tp> inline SeqIterator<_Tp> Seq<_Tp>::begin() const\n{ return SeqIterator<_Tp>(*this); }\n\ntemplate<typename _Tp> inline SeqIterator<_Tp> Seq<_Tp>::end() const\n{ return SeqIterator<_Tp>(*this, true); }\n\ntemplate<typename _Tp> inline size_t Seq<_Tp>::size() const\n{ return seq ? seq->total : 0; }\n\ntemplate<typename _Tp> inline int Seq<_Tp>::type() const\n{ return seq ? CV_MAT_TYPE(seq->flags) : 0; }\n\ntemplate<typename _Tp> inline int Seq<_Tp>::depth() const\n{ return seq ? CV_MAT_DEPTH(seq->flags) : 0; }\n\ntemplate<typename _Tp> inline int Seq<_Tp>::channels() const\n{ return seq ? CV_MAT_CN(seq->flags) : 0; }\n\ntemplate<typename _Tp> inline size_t Seq<_Tp>::elemSize() const\n{ return seq ? seq->elem_size : 0; }\n\ntemplate<typename _Tp> inline size_t Seq<_Tp>::index(const _Tp& elem) const\n{ return cvSeqElemIdx(seq, &elem); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::push_back(const _Tp& elem)\n{ cvSeqPush(seq, &elem); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::push_front(const _Tp& elem)\n{ cvSeqPushFront(seq, &elem); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::push_back(const _Tp* elem, size_t count)\n{ cvSeqPushMulti(seq, elem, (int)count, 0); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count)\n{ cvSeqPushMulti(seq, elem, (int)count, 1); }\n\ntemplate<typename _Tp> inline _Tp& Seq<_Tp>::back()\n{ return *(_Tp*)getSeqElem(seq, -1); }\n\ntemplate<typename _Tp> inline const _Tp& Seq<_Tp>::back() const\n{ return *(const _Tp*)getSeqElem(seq, -1); }\n\ntemplate<typename _Tp> inline _Tp& Seq<_Tp>::front()\n{ return *(_Tp*)getSeqElem(seq, 0); }\n\ntemplate<typename _Tp> inline const _Tp& Seq<_Tp>::front() const\n{ return *(const _Tp*)getSeqElem(seq, 0); }\n\ntemplate<typename _Tp> inline bool Seq<_Tp>::empty() const\n{ return !seq || seq->total == 0; }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::clear()\n{ if(seq) clearSeq(seq); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::pop_back()\n{ seqPop(seq); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::pop_front()\n{ seqPopFront(seq); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::pop_back(_Tp* elem, size_t count)\n{ seqPopMulti(seq, elem, (int)count, 0); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count)\n{ seqPopMulti(seq, elem, (int)count, 1); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::insert(int idx, const _Tp& elem)\n{ seqInsert(seq, idx, &elem); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count)\n{\n    CvMat m = cvMat(1, count, DataType<_Tp>::type, elems);\n    seqInsertSlice(seq, idx, &m);\n}\n\ntemplate<typename _Tp> inline void Seq<_Tp>::remove(int idx)\n{ seqRemove(seq, idx); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::remove(const Range& r)\n{ seqRemoveSlice(seq, cvSlice(r.start, r.end)); }\n\ntemplate<typename _Tp> inline void Seq<_Tp>::copyTo(std::vector<_Tp>& vec, const Range& range) const\n{\n    size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start;\n    vec.resize(len);\n    if( seq && len )\n        cvCvtSeqToArray(seq, &vec[0], range);\n}\n\ntemplate<typename _Tp> inline Seq<_Tp>::operator std::vector<_Tp>() const\n{\n    std::vector<_Tp> vec;\n    copyTo(vec);\n    return vec;\n}\n\ntemplate<typename _Tp> inline SeqIterator<_Tp>::SeqIterator()\n{ memset(this, 0, sizeof(*this)); }\n\ntemplate<typename _Tp> inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& _seq, bool seekEnd)\n{\n    cvStartReadSeq(_seq.seq, this);\n    index = seekEnd ? _seq.seq->total : 0;\n}\n\ntemplate<typename _Tp> inline void SeqIterator<_Tp>::seek(size_t pos)\n{\n    cvSetSeqReaderPos(this, (int)pos, false);\n    index = pos;\n}\n\ntemplate<typename _Tp> inline size_t SeqIterator<_Tp>::tell() const\n{ return index; }\n\ntemplate<typename _Tp> inline _Tp& SeqIterator<_Tp>::operator *()\n{ return *(_Tp*)ptr; }\n\ntemplate<typename _Tp> inline const _Tp& SeqIterator<_Tp>::operator *() const\n{ return *(const _Tp*)ptr; }\n\ntemplate<typename _Tp> inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator ++()\n{\n    CV_NEXT_SEQ_ELEM(sizeof(_Tp), *this);\n    if( ++index >= seq->total*2 )\n        index = 0;\n    return *this;\n}\n\ntemplate<typename _Tp> inline SeqIterator<_Tp> SeqIterator<_Tp>::operator ++(int) const\n{\n    SeqIterator<_Tp> it = *this;\n    ++*this;\n    return it;\n}\n\ntemplate<typename _Tp> inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator --()\n{\n    CV_PREV_SEQ_ELEM(sizeof(_Tp), *this);\n    if( --index < 0 )\n        index = seq->total*2-1;\n    return *this;\n}\n\ntemplate<typename _Tp> inline SeqIterator<_Tp> SeqIterator<_Tp>::operator --(int) const\n{\n    SeqIterator<_Tp> it = *this;\n    --*this;\n    return it;\n}\n\ntemplate<typename _Tp> inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator +=(int delta)\n{\n    cvSetSeqReaderPos(this, delta, 1);\n    index += delta;\n    int n = seq->total*2;\n    if( index < 0 )\n        index += n;\n    if( index >= n )\n        index -= n;\n    return *this;\n}\n\ntemplate<typename _Tp> inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator -=(int delta)\n{\n    return (*this += -delta);\n}\n\ntemplate<typename _Tp> inline ptrdiff_t operator - (const SeqIterator<_Tp>& a,\n                                                    const SeqIterator<_Tp>& b)\n{\n    ptrdiff_t delta = a.index - b.index, n = a.seq->total;\n    if( delta > n || delta < -n )\n        delta += delta < 0 ? n : -n;\n    return delta;\n}\n\ntemplate<typename _Tp> inline bool operator == (const SeqIterator<_Tp>& a,\n                                                const SeqIterator<_Tp>& b)\n{\n    return a.seq == b.seq && a.index == b.index;\n}\n\ntemplate<typename _Tp> inline bool operator != (const SeqIterator<_Tp>& a,\n                                                const SeqIterator<_Tp>& b)\n{\n    return !(a == b);\n}\n\n//! @}\n\n} // cv\n\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/block.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_DEVICE_BLOCK_HPP__\n#define __OPENCV_CUDA_DEVICE_BLOCK_HPP__\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    struct Block\n    {\n        static __device__ __forceinline__ unsigned int id()\n        {\n            return blockIdx.x;\n        }\n\n        static __device__ __forceinline__ unsigned int stride()\n        {\n            return blockDim.x * blockDim.y * blockDim.z;\n        }\n\n        static __device__ __forceinline__ void sync()\n        {\n            __syncthreads();\n        }\n\n        static __device__ __forceinline__ int flattenedThreadId()\n        {\n            return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;\n        }\n\n        template<typename It, typename T>\n        static __device__ __forceinline__ void fill(It beg, It end, const T& value)\n        {\n            int STRIDE = stride();\n            It t = beg + flattenedThreadId();\n\n            for(; t < end; t += STRIDE)\n                *t = value;\n        }\n\n        template<typename OutIt, typename T>\n        static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value)\n        {\n            int STRIDE = stride();\n            int tid = flattenedThreadId();\n            value += tid;\n\n            for(OutIt t = beg + tid; t < end; t += STRIDE, value += STRIDE)\n                *t = value;\n        }\n\n        template<typename InIt, typename OutIt>\n        static __device__ __forceinline__ void copy(InIt beg, InIt end, OutIt out)\n        {\n            int STRIDE = stride();\n            InIt  t = beg + flattenedThreadId();\n            OutIt o = out + (t - beg);\n\n            for(; t < end; t += STRIDE, o += STRIDE)\n                *o = *t;\n        }\n\n        template<typename InIt, typename OutIt, class UnOp>\n        static __device__ __forceinline__ void transfrom(InIt beg, InIt end, OutIt out, UnOp op)\n        {\n            int STRIDE = stride();\n            InIt  t = beg + flattenedThreadId();\n            OutIt o = out + (t - beg);\n\n            for(; t < end; t += STRIDE, o += STRIDE)\n                *o = op(*t);\n        }\n\n        template<typename InIt1, typename InIt2, typename OutIt, class BinOp>\n        static __device__ __forceinline__ void transfrom(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)\n        {\n            int STRIDE = stride();\n            InIt1 t1 = beg1 + flattenedThreadId();\n            InIt2 t2 = beg2 + flattenedThreadId();\n            OutIt o  = out + (t1 - beg1);\n\n            for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, o += STRIDE)\n                *o = op(*t1, *t2);\n        }\n\n        template<int CTA_SIZE, typename T, class BinOp>\n        static __device__ __forceinline__ void reduce(volatile T* buffer, BinOp op)\n        {\n            int tid = flattenedThreadId();\n            T val =  buffer[tid];\n\n            if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }\n            if (CTA_SIZE >=  512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }\n            if (CTA_SIZE >=  256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }\n            if (CTA_SIZE >=  128) { if (tid <  64) buffer[tid] = val = op(val, buffer[tid +  64]); __syncthreads(); }\n\n            if (tid < 32)\n            {\n                if (CTA_SIZE >=   64) { buffer[tid] = val = op(val, buffer[tid +  32]); }\n                if (CTA_SIZE >=   32) { buffer[tid] = val = op(val, buffer[tid +  16]); }\n                if (CTA_SIZE >=   16) { buffer[tid] = val = op(val, buffer[tid +   8]); }\n                if (CTA_SIZE >=    8) { buffer[tid] = val = op(val, buffer[tid +   4]); }\n                if (CTA_SIZE >=    4) { buffer[tid] = val = op(val, buffer[tid +   2]); }\n                if (CTA_SIZE >=    2) { buffer[tid] = val = op(val, buffer[tid +   1]); }\n            }\n        }\n\n        template<int CTA_SIZE, typename T, class BinOp>\n        static __device__ __forceinline__ T reduce(volatile T* buffer, T init, BinOp op)\n        {\n            int tid = flattenedThreadId();\n            T val =  buffer[tid] = init;\n            __syncthreads();\n\n            if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }\n            if (CTA_SIZE >=  512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }\n            if (CTA_SIZE >=  256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }\n            if (CTA_SIZE >=  128) { if (tid <  64) buffer[tid] = val = op(val, buffer[tid +  64]); __syncthreads(); }\n\n            if (tid < 32)\n            {\n                if (CTA_SIZE >=   64) { buffer[tid] = val = op(val, buffer[tid +  32]); }\n                if (CTA_SIZE >=   32) { buffer[tid] = val = op(val, buffer[tid +  16]); }\n                if (CTA_SIZE >=   16) { buffer[tid] = val = op(val, buffer[tid +   8]); }\n                if (CTA_SIZE >=    8) { buffer[tid] = val = op(val, buffer[tid +   4]); }\n                if (CTA_SIZE >=    4) { buffer[tid] = val = op(val, buffer[tid +   2]); }\n                if (CTA_SIZE >=    2) { buffer[tid] = val = op(val, buffer[tid +   1]); }\n            }\n            __syncthreads();\n            return buffer[0];\n        }\n\n        template <typename T, class BinOp>\n        static __device__ __forceinline__ void reduce_n(T* data, unsigned int n, BinOp op)\n        {\n            int ftid = flattenedThreadId();\n            int sft = stride();\n\n            if (sft < n)\n            {\n                for (unsigned int i = sft + ftid; i < n; i += sft)\n                    data[ftid] = op(data[ftid], data[i]);\n\n                __syncthreads();\n\n                n = sft;\n            }\n\n            while (n > 1)\n            {\n                unsigned int half = n/2;\n\n                if (ftid < half)\n                    data[ftid] = op(data[ftid], data[n - ftid - 1]);\n\n                __syncthreads();\n\n                n = n - half;\n            }\n        }\n    };\n}}}\n\n//! @endcond\n\n#endif /* __OPENCV_CUDA_DEVICE_BLOCK_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/border_interpolate.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_BORDER_INTERPOLATE_HPP__\n#define __OPENCV_CUDA_BORDER_INTERPOLATE_HPP__\n\n#include \"saturate_cast.hpp\"\n#include \"vec_traits.hpp\"\n#include \"vec_math.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    //////////////////////////////////////////////////////////////\n    // BrdConstant\n\n    template <typename D> struct BrdRowConstant\n    {\n        typedef D result_type;\n\n        explicit __host__ __device__ __forceinline__ BrdRowConstant(int width_, const D& val_ = VecTraits<D>::all(0)) : width(width_), val(val_) {}\n\n        template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const\n        {\n            return x >= 0 ? saturate_cast<D>(data[x]) : val;\n        }\n\n        template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const\n        {\n            return x < width ? saturate_cast<D>(data[x]) : val;\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int x, const T* data) const\n        {\n            return (x >= 0 && x < width) ? saturate_cast<D>(data[x]) : val;\n        }\n\n        int width;\n        D val;\n    };\n\n    template <typename D> struct BrdColConstant\n    {\n        typedef D result_type;\n\n        explicit __host__ __device__ __forceinline__ BrdColConstant(int height_, const D& val_ = VecTraits<D>::all(0)) : height(height_), val(val_) {}\n\n        template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const\n        {\n            return y >= 0 ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;\n        }\n\n        template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const\n        {\n            return y < height ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const\n        {\n            return (y >= 0 && y < height) ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;\n        }\n\n        int height;\n        D val;\n    };\n\n    template <typename D> struct BrdConstant\n    {\n        typedef D result_type;\n\n        __host__ __device__ __forceinline__ BrdConstant(int height_, int width_, const D& val_ = VecTraits<D>::all(0)) : height(height_), width(width_), val(val_)\n        {\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const\n        {\n            return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(((const T*)((const uchar*)data + y * step))[x]) : val;\n        }\n\n        template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const\n        {\n            return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(src(y, x)) : val;\n        }\n\n        int height;\n        int width;\n        D val;\n    };\n\n    //////////////////////////////////////////////////////////////\n    // BrdReplicate\n\n    template <typename D> struct BrdRowReplicate\n    {\n        typedef D result_type;\n\n        explicit __host__ __device__ __forceinline__ BrdRowReplicate(int width) : last_col(width - 1) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdRowReplicate(int width, U) : last_col(width - 1) {}\n\n        __device__ __forceinline__ int idx_col_low(int x) const\n        {\n            return ::max(x, 0);\n        }\n\n        __device__ __forceinline__ int idx_col_high(int x) const\n        {\n            return ::min(x, last_col);\n        }\n\n        __device__ __forceinline__ int idx_col(int x) const\n        {\n            return idx_col_low(idx_col_high(x));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col_low(x)]);\n        }\n\n        template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col_high(x)]);\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col(x)]);\n        }\n\n        int last_col;\n    };\n\n    template <typename D> struct BrdColReplicate\n    {\n        typedef D result_type;\n\n        explicit __host__ __device__ __forceinline__ BrdColReplicate(int height) : last_row(height - 1) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdColReplicate(int height, U) : last_row(height - 1) {}\n\n        __device__ __forceinline__ int idx_row_low(int y) const\n        {\n            return ::max(y, 0);\n        }\n\n        __device__ __forceinline__ int idx_row_high(int y) const\n        {\n            return ::min(y, last_row);\n        }\n\n        __device__ __forceinline__ int idx_row(int y) const\n        {\n            return idx_row_low(idx_row_high(y));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const T*)((const char*)data + idx_row_low(y) * step));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const T*)((const char*)data + idx_row_high(y) * step));\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const T*)((const char*)data + idx_row(y) * step));\n        }\n\n        int last_row;\n    };\n\n    template <typename D> struct BrdReplicate\n    {\n        typedef D result_type;\n\n        __host__ __device__ __forceinline__ BrdReplicate(int height, int width) : last_row(height - 1), last_col(width - 1) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdReplicate(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}\n\n        __device__ __forceinline__ int idx_row_low(int y) const\n        {\n            return ::max(y, 0);\n        }\n\n        __device__ __forceinline__ int idx_row_high(int y) const\n        {\n            return ::min(y, last_row);\n        }\n\n        __device__ __forceinline__ int idx_row(int y) const\n        {\n            return idx_row_low(idx_row_high(y));\n        }\n\n        __device__ __forceinline__ int idx_col_low(int x) const\n        {\n            return ::max(x, 0);\n        }\n\n        __device__ __forceinline__ int idx_col_high(int x) const\n        {\n            return ::min(x, last_col);\n        }\n\n        __device__ __forceinline__ int idx_col(int x) const\n        {\n            return idx_col_low(idx_col_high(x));\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);\n        }\n\n        template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const\n        {\n            return saturate_cast<D>(src(idx_row(y), idx_col(x)));\n        }\n\n        int last_row;\n        int last_col;\n    };\n\n    //////////////////////////////////////////////////////////////\n    // BrdReflect101\n\n    template <typename D> struct BrdRowReflect101\n    {\n        typedef D result_type;\n\n        explicit __host__ __device__ __forceinline__ BrdRowReflect101(int width) : last_col(width - 1) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdRowReflect101(int width, U) : last_col(width - 1) {}\n\n        __device__ __forceinline__ int idx_col_low(int x) const\n        {\n            return ::abs(x) % (last_col + 1);\n        }\n\n        __device__ __forceinline__ int idx_col_high(int x) const\n        {\n            return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1);\n        }\n\n        __device__ __forceinline__ int idx_col(int x) const\n        {\n            return idx_col_low(idx_col_high(x));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col_low(x)]);\n        }\n\n        template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col_high(x)]);\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col(x)]);\n        }\n\n        int last_col;\n    };\n\n    template <typename D> struct BrdColReflect101\n    {\n        typedef D result_type;\n\n        explicit __host__ __device__ __forceinline__ BrdColReflect101(int height) : last_row(height - 1) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdColReflect101(int height, U) : last_row(height - 1) {}\n\n        __device__ __forceinline__ int idx_row_low(int y) const\n        {\n            return ::abs(y) % (last_row + 1);\n        }\n\n        __device__ __forceinline__ int idx_row_high(int y) const\n        {\n            return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1);\n        }\n\n        __device__ __forceinline__ int idx_row(int y) const\n        {\n            return idx_row_low(idx_row_high(y));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));\n        }\n\n        int last_row;\n    };\n\n    template <typename D> struct BrdReflect101\n    {\n        typedef D result_type;\n\n        __host__ __device__ __forceinline__ BrdReflect101(int height, int width) : last_row(height - 1), last_col(width - 1) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdReflect101(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}\n\n        __device__ __forceinline__ int idx_row_low(int y) const\n        {\n            return ::abs(y) % (last_row + 1);\n        }\n\n        __device__ __forceinline__ int idx_row_high(int y) const\n        {\n            return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1);\n        }\n\n        __device__ __forceinline__ int idx_row(int y) const\n        {\n            return idx_row_low(idx_row_high(y));\n        }\n\n        __device__ __forceinline__ int idx_col_low(int x) const\n        {\n            return ::abs(x) % (last_col + 1);\n        }\n\n        __device__ __forceinline__ int idx_col_high(int x) const\n        {\n            return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1);\n        }\n\n        __device__ __forceinline__ int idx_col(int x) const\n        {\n            return idx_col_low(idx_col_high(x));\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);\n        }\n\n        template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const\n        {\n            return saturate_cast<D>(src(idx_row(y), idx_col(x)));\n        }\n\n        int last_row;\n        int last_col;\n    };\n\n    //////////////////////////////////////////////////////////////\n    // BrdReflect\n\n    template <typename D> struct BrdRowReflect\n    {\n        typedef D result_type;\n\n        explicit __host__ __device__ __forceinline__ BrdRowReflect(int width) : last_col(width - 1) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdRowReflect(int width, U) : last_col(width - 1) {}\n\n        __device__ __forceinline__ int idx_col_low(int x) const\n        {\n            return (::abs(x) - (x < 0)) % (last_col + 1);\n        }\n\n        __device__ __forceinline__ int idx_col_high(int x) const\n        {\n            return ::abs(last_col - ::abs(last_col - x) + (x > last_col)) % (last_col + 1);\n        }\n\n        __device__ __forceinline__ int idx_col(int x) const\n        {\n            return idx_col_high(::abs(x) - (x < 0));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col_low(x)]);\n        }\n\n        template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col_high(x)]);\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col(x)]);\n        }\n\n        int last_col;\n    };\n\n    template <typename D> struct BrdColReflect\n    {\n        typedef D result_type;\n\n        explicit __host__ __device__ __forceinline__ BrdColReflect(int height) : last_row(height - 1) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdColReflect(int height, U) : last_row(height - 1) {}\n\n        __device__ __forceinline__ int idx_row_low(int y) const\n        {\n            return (::abs(y) - (y < 0)) % (last_row + 1);\n        }\n\n        __device__ __forceinline__ int idx_row_high(int y) const\n        {\n            return ::abs(last_row - ::abs(last_row - y) + (y > last_row)) % (last_row + 1);\n        }\n\n        __device__ __forceinline__ int idx_row(int y) const\n        {\n            return idx_row_high(::abs(y) - (y < 0));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));\n        }\n\n        int last_row;\n    };\n\n    template <typename D> struct BrdReflect\n    {\n        typedef D result_type;\n\n        __host__ __device__ __forceinline__ BrdReflect(int height, int width) : last_row(height - 1), last_col(width - 1) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdReflect(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}\n\n        __device__ __forceinline__ int idx_row_low(int y) const\n        {\n            return (::abs(y) - (y < 0)) % (last_row + 1);\n        }\n\n        __device__ __forceinline__ int idx_row_high(int y) const\n        {\n            return /*::abs*/(last_row - ::abs(last_row - y) + (y > last_row)) /*% (last_row + 1)*/;\n        }\n\n        __device__ __forceinline__ int idx_row(int y) const\n        {\n            return idx_row_low(idx_row_high(y));\n        }\n\n        __device__ __forceinline__ int idx_col_low(int x) const\n        {\n            return (::abs(x) - (x < 0)) % (last_col + 1);\n        }\n\n        __device__ __forceinline__ int idx_col_high(int x) const\n        {\n            return (last_col - ::abs(last_col - x) + (x > last_col));\n        }\n\n        __device__ __forceinline__ int idx_col(int x) const\n        {\n            return idx_col_low(idx_col_high(x));\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);\n        }\n\n        template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const\n        {\n            return saturate_cast<D>(src(idx_row(y), idx_col(x)));\n        }\n\n        int last_row;\n        int last_col;\n    };\n\n    //////////////////////////////////////////////////////////////\n    // BrdWrap\n\n    template <typename D> struct BrdRowWrap\n    {\n        typedef D result_type;\n\n        explicit __host__ __device__ __forceinline__ BrdRowWrap(int width_) : width(width_) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdRowWrap(int width_, U) : width(width_) {}\n\n        __device__ __forceinline__ int idx_col_low(int x) const\n        {\n            return (x >= 0) * x + (x < 0) * (x - ((x - width + 1) / width) * width);\n        }\n\n        __device__ __forceinline__ int idx_col_high(int x) const\n        {\n            return (x < width) * x + (x >= width) * (x % width);\n        }\n\n        __device__ __forceinline__ int idx_col(int x) const\n        {\n            return idx_col_high(idx_col_low(x));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col_low(x)]);\n        }\n\n        template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col_high(x)]);\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int x, const T* data) const\n        {\n            return saturate_cast<D>(data[idx_col(x)]);\n        }\n\n        int width;\n    };\n\n    template <typename D> struct BrdColWrap\n    {\n        typedef D result_type;\n\n        explicit __host__ __device__ __forceinline__ BrdColWrap(int height_) : height(height_) {}\n        template <typename U> __host__ __device__ __forceinline__ BrdColWrap(int height_, U) : height(height_) {}\n\n        __device__ __forceinline__ int idx_row_low(int y) const\n        {\n            return (y >= 0) * y + (y < 0) * (y - ((y - height + 1) / height) * height);\n        }\n\n        __device__ __forceinline__ int idx_row_high(int y) const\n        {\n            return (y < height) * y + (y >= height) * (y % height);\n        }\n\n        __device__ __forceinline__ int idx_row(int y) const\n        {\n            return idx_row_high(idx_row_low(y));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));\n        }\n\n        template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));\n        }\n\n        int height;\n    };\n\n    template <typename D> struct BrdWrap\n    {\n        typedef D result_type;\n\n        __host__ __device__ __forceinline__ BrdWrap(int height_, int width_) :\n            height(height_), width(width_)\n        {\n        }\n        template <typename U>\n        __host__ __device__ __forceinline__ BrdWrap(int height_, int width_, U) :\n            height(height_), width(width_)\n        {\n        }\n\n        __device__ __forceinline__ int idx_row_low(int y) const\n        {\n            return (y >= 0) ? y : (y - ((y - height + 1) / height) * height);\n        }\n\n        __device__ __forceinline__ int idx_row_high(int y) const\n        {\n            return (y < height) ? y : (y % height);\n        }\n\n        __device__ __forceinline__ int idx_row(int y) const\n        {\n            return idx_row_high(idx_row_low(y));\n        }\n\n        __device__ __forceinline__ int idx_col_low(int x) const\n        {\n            return (x >= 0) ? x : (x - ((x - width + 1) / width) * width);\n        }\n\n        __device__ __forceinline__ int idx_col_high(int x) const\n        {\n            return (x < width) ? x : (x % width);\n        }\n\n        __device__ __forceinline__ int idx_col(int x) const\n        {\n            return idx_col_high(idx_col_low(x));\n        }\n\n        template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const\n        {\n            return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);\n        }\n\n        template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const\n        {\n            return saturate_cast<D>(src(idx_row(y), idx_col(x)));\n        }\n\n        int height;\n        int width;\n    };\n\n    //////////////////////////////////////////////////////////////\n    // BorderReader\n\n    template <typename Ptr2D, typename B> struct BorderReader\n    {\n        typedef typename B::result_type elem_type;\n        typedef typename Ptr2D::index_type index_type;\n\n        __host__ __device__ __forceinline__ BorderReader(const Ptr2D& ptr_, const B& b_) : ptr(ptr_), b(b_) {}\n\n        __device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const\n        {\n            return b.at(y, x, ptr);\n        }\n\n        Ptr2D ptr;\n        B b;\n    };\n\n    // under win32 there is some bug with templated types that passed as kernel parameters\n    // with this specialization all works fine\n    template <typename Ptr2D, typename D> struct BorderReader< Ptr2D, BrdConstant<D> >\n    {\n        typedef typename BrdConstant<D>::result_type elem_type;\n        typedef typename Ptr2D::index_type index_type;\n\n        __host__ __device__ __forceinline__ BorderReader(const Ptr2D& src_, const BrdConstant<D>& b) :\n            src(src_), height(b.height), width(b.width), val(b.val)\n        {\n        }\n\n        __device__ __forceinline__ D operator ()(index_type y, index_type x) const\n        {\n            return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(src(y, x)) : val;\n        }\n\n        Ptr2D src;\n        int height;\n        int width;\n        D val;\n    };\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_BORDER_INTERPOLATE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/color.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_COLOR_HPP__\n#define __OPENCV_CUDA_COLOR_HPP__\n\n#include \"detail/color_detail.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    // All OPENCV_CUDA_IMPLEMENT_*_TRAITS(ColorSpace1_to_ColorSpace2, ...) macros implements\n    // template <typename T> class ColorSpace1_to_ColorSpace2_traits\n    // {\n    //     typedef ... functor_type;\n    //     static __host__ __device__ functor_type create_functor();\n    // };\n\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgb, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_bgra, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgba, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_bgr, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgb, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgba, 4, 4, 2)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr555, 3, 0, 5)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr565, 3, 0, 6)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr555, 3, 2, 5)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr565, 3, 2, 6)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr555, 4, 0, 5)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr565, 4, 0, 6)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr555, 4, 2, 5)\n    OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr565, 4, 2, 6)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgb, 3, 2, 5)\n    OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgb, 3, 2, 6)\n    OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgr, 3, 0, 5)\n    OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgr, 3, 0, 6)\n    OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgba, 4, 2, 5)\n    OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgba, 4, 2, 6)\n    OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgra, 4, 0, 5)\n    OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgra, 4, 0, 6)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgr, 3)\n    OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgra, 4)\n\n    #undef OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr555, 5)\n    OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr565, 6)\n\n    #undef OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr555_to_gray, 5)\n    OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr565_to_gray, 6)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(rgb_to_gray, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(bgr_to_gray, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(rgba_to_gray, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(bgra_to_gray, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb4, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb4, 4, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb, 3, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb4, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb4, 4, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgb, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgba, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgb, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgba, 4, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgr, 3, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgra, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgr, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgra, 4, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz4, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz4, 4, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz, 3, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz4, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz4, 4, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgb, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgb, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgba, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgba, 4, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgr, 3, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgr, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgra, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgra, 4, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv4, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv4, 4, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv, 3, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv4, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv4, 4, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgb, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgba, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgb, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgba, 4, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgr, 3, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgra, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgr, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgra, 4, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls4, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls4, 4, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls, 3, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls4, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls4, 4, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgb, 3, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgba, 3, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgb, 4, 3, 2)\n    OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgba, 4, 4, 2)\n    OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgr, 3, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgra, 3, 4, 0)\n    OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgr, 4, 3, 0)\n    OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab, 3, 3, true, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab, 4, 3, true, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab4, 3, 4, true, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab4, 4, 4, true, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab, 3, 3, true, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab, 4, 3, true, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab4, 3, 4, true, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab4, 4, 4, true, 0)\n\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab, 3, 3, false, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab, 4, 3, false, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab4, 3, 4, false, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab4, 4, 4, false, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab, 3, 3, false, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab, 4, 3, false, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab4, 3, 4, false, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab4, 4, 4, false, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgb, 3, 3, true, 2)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgb, 4, 3, true, 2)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgba, 3, 4, true, 2)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgba, 4, 4, true, 2)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgr, 3, 3, true, 0)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgr, 4, 3, true, 0)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgra, 3, 4, true, 0)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgra, 4, 4, true, 0)\n\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgb, 3, 3, false, 2)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgb, 4, 3, false, 2)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgba, 3, 4, false, 2)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgba, 4, 4, false, 2)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgr, 3, 3, false, 0)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgr, 4, 3, false, 0)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgra, 3, 4, false, 0)\n    OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgra, 4, 4, false, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv, 3, 3, true, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv, 4, 3, true, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv4, 3, 4, true, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv4, 4, 4, true, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv, 3, 3, true, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv, 4, 3, true, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv4, 3, 4, true, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv4, 4, 4, true, 0)\n\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv, 3, 3, false, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv, 4, 3, false, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv4, 3, 4, false, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv4, 4, 4, false, 2)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv, 3, 3, false, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv, 4, 3, false, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv4, 3, 4, false, 0)\n    OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv4, 4, 4, false, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS\n\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgb, 3, 3, true, 2)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgb, 4, 3, true, 2)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgba, 3, 4, true, 2)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgba, 4, 4, true, 2)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgr, 3, 3, true, 0)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgr, 4, 3, true, 0)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgra, 3, 4, true, 0)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgra, 4, 4, true, 0)\n\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgb, 3, 3, false, 2)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgb, 4, 3, false, 2)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgba, 3, 4, false, 2)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgba, 4, 4, false, 2)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgr, 3, 3, false, 0)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgr, 4, 3, false, 0)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgra, 3, 4, false, 0)\n    OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgra, 4, 4, false, 0)\n\n    #undef OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_BORDER_INTERPOLATE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/common.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_COMMON_HPP__\n#define __OPENCV_CUDA_COMMON_HPP__\n\n#include <cuda_runtime.h>\n#include \"opencv2/core/cuda_types.hpp\"\n#include \"opencv2/core/cvdef.h\"\n#include \"opencv2/core/base.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\n#ifndef CV_PI_F\n    #ifndef CV_PI\n        #define CV_PI_F 3.14159265f\n    #else\n        #define CV_PI_F ((float)CV_PI)\n    #endif\n#endif\n\nnamespace cv { namespace cuda {\n    static inline void checkCudaError(cudaError_t err, const char* file, const int line, const char* func)\n    {\n        if (cudaSuccess != err)\n            cv::error(cv::Error::GpuApiCallError, cudaGetErrorString(err), func, file, line);\n    }\n}}\n\n#ifndef cudaSafeCall\n    #define cudaSafeCall(expr)  cv::cuda::checkCudaError(expr, __FILE__, __LINE__, CV_Func)\n#endif\n\nnamespace cv { namespace cuda\n{\n    template <typename T> static inline bool isAligned(const T* ptr, size_t size)\n    {\n        return reinterpret_cast<size_t>(ptr) % size == 0;\n    }\n\n    static inline bool isAligned(size_t step, size_t size)\n    {\n        return step % size == 0;\n    }\n}}\n\nnamespace cv { namespace cuda\n{\n    namespace device\n    {\n        __host__ __device__ __forceinline__ int divUp(int total, int grain)\n        {\n            return (total + grain - 1) / grain;\n        }\n\n        template<class T> inline void bindTexture(const textureReference* tex, const PtrStepSz<T>& img)\n        {\n            cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();\n            cudaSafeCall( cudaBindTexture2D(0, tex, img.ptr(), &desc, img.cols, img.rows, img.step) );\n        }\n    }\n}}\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_COMMON_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/datamov_utils.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_DATAMOV_UTILS_HPP__\n#define __OPENCV_CUDA_DATAMOV_UTILS_HPP__\n\n#include \"common.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200\n\n        // for Fermi memory space is detected automatically\n        template <typename T> struct ForceGlob\n        {\n            __device__ __forceinline__ static void Load(const T* ptr, int offset, T& val)  { val = ptr[offset];  }\n        };\n\n    #else // __CUDA_ARCH__ >= 200\n\n        #if defined(_WIN64) || defined(__LP64__)\n            // 64-bit register modifier for inlined asm\n            #define OPENCV_CUDA_ASM_PTR \"l\"\n        #else\n            // 32-bit register modifier for inlined asm\n            #define OPENCV_CUDA_ASM_PTR \"r\"\n        #endif\n\n        template<class T> struct ForceGlob;\n\n        #define OPENCV_CUDA_DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \\\n            template <> struct ForceGlob<base_type> \\\n            { \\\n                __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \\\n                { \\\n                    asm(\"ld.global.\"#ptx_type\" %0, [%1];\" : \"=\"#reg_mod(val) : OPENCV_CUDA_ASM_PTR(ptr + offset)); \\\n                } \\\n            };\n\n        #define OPENCV_CUDA_DEFINE_FORCE_GLOB_B(base_type, ptx_type) \\\n            template <> struct ForceGlob<base_type> \\\n            { \\\n                __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \\\n                { \\\n                    asm(\"ld.global.\"#ptx_type\" %0, [%1];\" : \"=r\"(*reinterpret_cast<uint*>(&val)) : OPENCV_CUDA_ASM_PTR(ptr + offset)); \\\n                } \\\n            };\n\n            OPENCV_CUDA_DEFINE_FORCE_GLOB_B(uchar,  u8)\n            OPENCV_CUDA_DEFINE_FORCE_GLOB_B(schar,  s8)\n            OPENCV_CUDA_DEFINE_FORCE_GLOB_B(char,   b8)\n            OPENCV_CUDA_DEFINE_FORCE_GLOB  (ushort, u16, h)\n            OPENCV_CUDA_DEFINE_FORCE_GLOB  (short,  s16, h)\n            OPENCV_CUDA_DEFINE_FORCE_GLOB  (uint,   u32, r)\n            OPENCV_CUDA_DEFINE_FORCE_GLOB  (int,    s32, r)\n            OPENCV_CUDA_DEFINE_FORCE_GLOB  (float,  f32, f)\n            OPENCV_CUDA_DEFINE_FORCE_GLOB  (double, f64, d)\n\n        #undef OPENCV_CUDA_DEFINE_FORCE_GLOB\n        #undef OPENCV_CUDA_DEFINE_FORCE_GLOB_B\n        #undef OPENCV_CUDA_ASM_PTR\n\n    #endif // __CUDA_ARCH__ >= 200\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_DATAMOV_UTILS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/detail/color_detail.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_COLOR_DETAIL_HPP__\n#define __OPENCV_CUDA_COLOR_DETAIL_HPP__\n\n#include \"../common.hpp\"\n#include \"../vec_traits.hpp\"\n#include \"../saturate_cast.hpp\"\n#include \"../limits.hpp\"\n#include \"../functional.hpp\"\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    #ifndef CV_DESCALE\n        #define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n))\n    #endif\n\n    namespace color_detail\n    {\n        template<typename T> struct ColorChannel\n        {\n            typedef float worktype_f;\n            static __device__ __forceinline__ T max() { return numeric_limits<T>::max(); }\n            static __device__ __forceinline__ T half() { return (T)(max()/2 + 1); }\n        };\n\n        template<> struct ColorChannel<float>\n        {\n            typedef float worktype_f;\n            static __device__ __forceinline__ float max() { return 1.f; }\n            static __device__ __forceinline__ float half() { return 0.5f; }\n        };\n\n        template <typename T> static __device__ __forceinline__ void setAlpha(typename TypeVec<T, 3>::vec_type& vec, T val)\n        {\n        }\n\n        template <typename T> static __device__ __forceinline__ void setAlpha(typename TypeVec<T, 4>::vec_type& vec, T val)\n        {\n            vec.w = val;\n        }\n\n        template <typename T> static __device__ __forceinline__ T getAlpha(const typename TypeVec<T, 3>::vec_type& vec)\n        {\n            return ColorChannel<T>::max();\n        }\n\n        template <typename T> static __device__ __forceinline__ T getAlpha(const typename TypeVec<T, 4>::vec_type& vec)\n        {\n            return vec.w;\n        }\n\n        enum\n        {\n            yuv_shift  = 14,\n            xyz_shift  = 12,\n            R2Y        = 4899,\n            G2Y        = 9617,\n            B2Y        = 1868,\n            BLOCK_SIZE = 256\n        };\n    }\n\n////////////////// Various 3/4-channel to 3/4-channel RGB transformations /////////////////\n\n    namespace color_detail\n    {\n        template <typename T, int scn, int dcn, int bidx> struct RGB2RGB\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n\n                dst.x = (&src.x)[bidx];\n                dst.y = src.y;\n                dst.z = (&src.x)[bidx^2];\n                setAlpha(dst, getAlpha<T>(src));\n\n                return dst;\n            }\n\n            __host__ __device__ __forceinline__ RGB2RGB() {}\n            __host__ __device__ __forceinline__ RGB2RGB(const RGB2RGB&) {}\n        };\n\n        template <> struct RGB2RGB<uchar, 4, 4, 2> : unary_function<uint, uint>\n        {\n            __device__ uint operator()(uint src) const\n            {\n                uint dst = 0;\n\n                dst |= (0xffu & (src >> 16));\n                dst |= (0xffu & (src >> 8)) << 8;\n                dst |= (0xffu & (src)) << 16;\n                dst |= (0xffu & (src >> 24)) << 24;\n\n                return dst;\n            }\n\n            __host__ __device__ __forceinline__ RGB2RGB() {}\n            __host__ __device__ __forceinline__ RGB2RGB(const RGB2RGB&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2RGB<T, scn, dcn, bidx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n/////////// Transforming 16-bit (565 or 555) RGB to/from 24/32-bit (888[8]) RGB //////////\n\n    namespace color_detail\n    {\n        template <int green_bits, int bidx> struct RGB2RGB5x5Converter;\n        template<int bidx> struct RGB2RGB5x5Converter<6, bidx>\n        {\n            static __device__ __forceinline__ ushort cvt(const uchar3& src)\n            {\n                return (ushort)(((&src.x)[bidx] >> 3) | ((src.y & ~3) << 3) | (((&src.x)[bidx^2] & ~7) << 8));\n            }\n\n            static __device__ __forceinline__ ushort cvt(uint src)\n            {\n                uint b = 0xffu & (src >> (bidx * 8));\n                uint g = 0xffu & (src >> 8);\n                uint r = 0xffu & (src >> ((bidx ^ 2) * 8));\n                return (ushort)((b >> 3) | ((g & ~3) << 3) | ((r & ~7) << 8));\n            }\n        };\n\n        template<int bidx> struct RGB2RGB5x5Converter<5, bidx>\n        {\n            static __device__ __forceinline__ ushort cvt(const uchar3& src)\n            {\n                return (ushort)(((&src.x)[bidx] >> 3) | ((src.y & ~7) << 2) | (((&src.x)[bidx^2] & ~7) << 7));\n            }\n\n            static __device__ __forceinline__ ushort cvt(uint src)\n            {\n                uint b = 0xffu & (src >> (bidx * 8));\n                uint g = 0xffu & (src >> 8);\n                uint r = 0xffu & (src >> ((bidx ^ 2) * 8));\n                uint a = 0xffu & (src >> 24);\n                return (ushort)((b >> 3) | ((g & ~7) << 2) | ((r & ~7) << 7) | (a * 0x8000));\n            }\n        };\n\n        template<int scn, int bidx, int green_bits> struct RGB2RGB5x5;\n\n        template<int bidx, int green_bits> struct RGB2RGB5x5<3, bidx,green_bits> : unary_function<uchar3, ushort>\n        {\n            __device__ __forceinline__ ushort operator()(const uchar3& src) const\n            {\n                return RGB2RGB5x5Converter<green_bits, bidx>::cvt(src);\n            }\n\n            __host__ __device__ __forceinline__ RGB2RGB5x5() {}\n            __host__ __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5&) {}\n        };\n\n        template<int bidx, int green_bits> struct RGB2RGB5x5<4, bidx,green_bits> : unary_function<uint, ushort>\n        {\n            __device__ __forceinline__ ushort operator()(uint src) const\n            {\n                return RGB2RGB5x5Converter<green_bits, bidx>::cvt(src);\n            }\n\n            __host__ __device__ __forceinline__ RGB2RGB5x5() {}\n            __host__ __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(name, scn, bidx, green_bits) \\\n    struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2RGB5x5<scn, bidx, green_bits> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    namespace color_detail\n    {\n        template <int green_bits, int bidx> struct RGB5x52RGBConverter;\n\n        template <int bidx> struct RGB5x52RGBConverter<5, bidx>\n        {\n            static __device__ __forceinline__ void cvt(uint src, uchar3& dst)\n            {\n                (&dst.x)[bidx] = src << 3;\n                dst.y = (src >> 2) & ~7;\n                (&dst.x)[bidx ^ 2] = (src >> 7) & ~7;\n            }\n\n            static __device__ __forceinline__ void cvt(uint src, uint& dst)\n            {\n                dst = 0;\n\n                dst |= (0xffu & (src << 3)) << (bidx * 8);\n                dst |= (0xffu & ((src >> 2) & ~7)) << 8;\n                dst |= (0xffu & ((src >> 7) & ~7)) << ((bidx ^ 2) * 8);\n                dst |= ((src & 0x8000) * 0xffu) << 24;\n            }\n        };\n\n        template <int bidx> struct RGB5x52RGBConverter<6, bidx>\n        {\n            static __device__ __forceinline__ void cvt(uint src, uchar3& dst)\n            {\n                (&dst.x)[bidx] = src << 3;\n                dst.y = (src >> 3) & ~3;\n                (&dst.x)[bidx ^ 2] = (src >> 8) & ~7;\n            }\n\n            static __device__ __forceinline__ void cvt(uint src, uint& dst)\n            {\n                dst = 0xffu << 24;\n\n                dst |= (0xffu & (src << 3)) << (bidx * 8);\n                dst |= (0xffu &((src >> 3) & ~3)) << 8;\n                dst |= (0xffu & ((src >> 8) & ~7)) << ((bidx ^ 2) * 8);\n            }\n        };\n\n        template <int dcn, int bidx, int green_bits> struct RGB5x52RGB;\n\n        template <int bidx, int green_bits> struct RGB5x52RGB<3, bidx, green_bits> : unary_function<ushort, uchar3>\n        {\n            __device__ __forceinline__ uchar3 operator()(ushort src) const\n            {\n                uchar3 dst;\n                RGB5x52RGBConverter<green_bits, bidx>::cvt(src, dst);\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB5x52RGB() {}\n            __host__ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB&) {}\n\n        };\n\n        template <int bidx, int green_bits> struct RGB5x52RGB<4, bidx, green_bits> : unary_function<ushort, uint>\n        {\n            __device__ __forceinline__ uint operator()(ushort src) const\n            {\n                uint dst;\n                RGB5x52RGBConverter<green_bits, bidx>::cvt(src, dst);\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB5x52RGB() {}\n            __host__ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(name, dcn, bidx, green_bits) \\\n    struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB5x52RGB<dcn, bidx, green_bits> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n///////////////////////////////// Grayscale to Color ////////////////////////////////\n\n    namespace color_detail\n    {\n        template <typename T, int dcn> struct Gray2RGB : unary_function<T, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(T src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n\n                dst.z = dst.y = dst.x = src;\n                setAlpha(dst, ColorChannel<T>::max());\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ Gray2RGB() {}\n            __host__ __device__ __forceinline__ Gray2RGB(const Gray2RGB&) {}\n        };\n\n        template <> struct Gray2RGB<uchar, 4> : unary_function<uchar, uint>\n        {\n            __device__ __forceinline__ uint operator()(uint src) const\n            {\n                uint dst = 0xffu << 24;\n\n                dst |= src;\n                dst |= src << 8;\n                dst |= src << 16;\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ Gray2RGB() {}\n            __host__ __device__ __forceinline__ Gray2RGB(const Gray2RGB&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(name, dcn) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::Gray2RGB<T, dcn> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    namespace color_detail\n    {\n        template <int green_bits> struct Gray2RGB5x5Converter;\n        template<> struct Gray2RGB5x5Converter<6>\n        {\n            static __device__ __forceinline__ ushort cvt(uint t)\n            {\n                return (ushort)((t >> 3) | ((t & ~3) << 3) | ((t & ~7) << 8));\n            }\n        };\n\n        template<> struct Gray2RGB5x5Converter<5>\n        {\n            static __device__ __forceinline__ ushort cvt(uint t)\n            {\n                t >>= 3;\n                return (ushort)(t | (t << 5) | (t << 10));\n            }\n        };\n\n        template<int green_bits> struct Gray2RGB5x5 : unary_function<uchar, ushort>\n        {\n            __device__ __forceinline__ ushort operator()(uint src) const\n            {\n                return Gray2RGB5x5Converter<green_bits>::cvt(src);\n            }\n\n            __host__ __device__ __forceinline__ Gray2RGB5x5() {}\n            __host__ __device__ __forceinline__ Gray2RGB5x5(const Gray2RGB5x5&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(name, green_bits) \\\n    struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::Gray2RGB5x5<green_bits> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n///////////////////////////////// Color to Grayscale ////////////////////////////////\n\n    namespace color_detail\n    {\n        template <int green_bits> struct RGB5x52GrayConverter;\n        template <> struct RGB5x52GrayConverter<6>\n        {\n            static __device__ __forceinline__ uchar cvt(uint t)\n            {\n                return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 3) & 0xfc) * G2Y + ((t >> 8) & 0xf8) * R2Y, yuv_shift);\n            }\n        };\n\n        template <> struct RGB5x52GrayConverter<5>\n        {\n            static __device__ __forceinline__ uchar cvt(uint t)\n            {\n                return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 2) & 0xf8) * G2Y + ((t >> 7) & 0xf8) * R2Y, yuv_shift);\n            }\n        };\n\n        template<int green_bits> struct RGB5x52Gray : unary_function<ushort, uchar>\n        {\n            __device__ __forceinline__ uchar operator()(uint src) const\n            {\n                return RGB5x52GrayConverter<green_bits>::cvt(src);\n            }\n            __host__ __device__ __forceinline__ RGB5x52Gray() {}\n            __host__ __device__ __forceinline__ RGB5x52Gray(const RGB5x52Gray&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(name, green_bits) \\\n    struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB5x52Gray<green_bits> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    namespace color_detail\n    {\n        template <int bidx, typename T> static __device__ __forceinline__ T RGB2GrayConvert(const T* src)\n        {\n            return (T)CV_DESCALE((unsigned)(src[bidx] * B2Y + src[1] * G2Y + src[bidx^2] * R2Y), yuv_shift);\n        }\n\n        template <int bidx> static __device__ __forceinline__ uchar RGB2GrayConvert(uint src)\n        {\n            uint b = 0xffu & (src >> (bidx * 8));\n            uint g = 0xffu & (src >> 8);\n            uint r = 0xffu & (src >> ((bidx ^ 2) * 8));\n            return CV_DESCALE((uint)(b * B2Y + g * G2Y + r * R2Y), yuv_shift);\n        }\n\n        template <int bidx> static __device__ __forceinline__ float RGB2GrayConvert(const float* src)\n        {\n            return src[bidx] * 0.114f + src[1] * 0.587f + src[bidx^2] * 0.299f;\n        }\n\n        template <typename T, int scn, int bidx> struct RGB2Gray : unary_function<typename TypeVec<T, scn>::vec_type, T>\n        {\n            __device__ __forceinline__ T operator()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                return RGB2GrayConvert<bidx>(&src.x);\n            }\n            __host__ __device__ __forceinline__ RGB2Gray() {}\n            __host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {}\n        };\n\n        template <int bidx> struct RGB2Gray<uchar, 4, bidx> : unary_function<uint, uchar>\n        {\n            __device__ __forceinline__ uchar operator()(uint src) const\n            {\n                return RGB2GrayConvert<bidx>(src);\n            }\n            __host__ __device__ __forceinline__ RGB2Gray() {}\n            __host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(name, scn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2Gray<T, scn, bidx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n///////////////////////////////////// RGB <-> YUV //////////////////////////////////////\n\n    namespace color_detail\n    {\n        __constant__ float c_RGB2YUVCoeffs_f[5] = { 0.114f, 0.587f, 0.299f, 0.492f, 0.877f };\n        __constant__ int   c_RGB2YUVCoeffs_i[5] = { B2Y, G2Y, R2Y, 8061, 14369 };\n\n        template <int bidx, typename T, typename D> static __device__ void RGB2YUVConvert(const T* src, D& dst)\n        {\n            const int delta = ColorChannel<T>::half() * (1 << yuv_shift);\n\n            const int Y = CV_DESCALE(src[0] * c_RGB2YUVCoeffs_i[bidx^2] + src[1] * c_RGB2YUVCoeffs_i[1] + src[2] * c_RGB2YUVCoeffs_i[bidx], yuv_shift);\n            const int Cr = CV_DESCALE((src[bidx^2] - Y) * c_RGB2YUVCoeffs_i[3] + delta, yuv_shift);\n            const int Cb = CV_DESCALE((src[bidx] - Y) * c_RGB2YUVCoeffs_i[4] + delta, yuv_shift);\n\n            dst.x = saturate_cast<T>(Y);\n            dst.y = saturate_cast<T>(Cr);\n            dst.z = saturate_cast<T>(Cb);\n        }\n\n        template <int bidx, typename D> static __device__ __forceinline__ void RGB2YUVConvert(const float* src, D& dst)\n        {\n            dst.x = src[0] * c_RGB2YUVCoeffs_f[bidx^2] + src[1] * c_RGB2YUVCoeffs_f[1] + src[2] * c_RGB2YUVCoeffs_f[bidx];\n            dst.y = (src[bidx^2] - dst.x) * c_RGB2YUVCoeffs_f[3] + ColorChannel<float>::half();\n            dst.z = (src[bidx] - dst.x) * c_RGB2YUVCoeffs_f[4] + ColorChannel<float>::half();\n        }\n\n        template <typename T, int scn, int dcn, int bidx> struct RGB2YUV\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n                RGB2YUVConvert<bidx>(&src.x, dst);\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB2YUV() {}\n            __host__ __device__ __forceinline__ RGB2YUV(const RGB2YUV&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2YUV<T, scn, dcn, bidx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    namespace color_detail\n    {\n        __constant__ float c_YUV2RGBCoeffs_f[5] = { 2.032f, -0.395f, -0.581f, 1.140f };\n        __constant__ int   c_YUV2RGBCoeffs_i[5] = { 33292, -6472, -9519, 18678 };\n\n        template <int bidx, typename T, typename D> static __device__ void YUV2RGBConvert(const T& src, D* dst)\n        {\n            const int b = src.x + CV_DESCALE((src.z - ColorChannel<D>::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift);\n\n            const int g = src.x + CV_DESCALE((src.z - ColorChannel<D>::half()) * c_YUV2RGBCoeffs_i[2]\n                                             + (src.y - ColorChannel<D>::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift);\n\n            const int r = src.x + CV_DESCALE((src.y - ColorChannel<D>::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift);\n\n            dst[bidx] = saturate_cast<D>(b);\n            dst[1] = saturate_cast<D>(g);\n            dst[bidx^2] = saturate_cast<D>(r);\n        }\n\n        template <int bidx> static __device__ uint YUV2RGBConvert(uint src)\n        {\n            const int x = 0xff & (src);\n            const int y = 0xff & (src >> 8);\n            const int z = 0xff & (src >> 16);\n\n            const int b = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift);\n\n            const int g = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[2]\n                                         + (y - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift);\n\n            const int r = x + CV_DESCALE((y - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift);\n\n            uint dst = 0xffu << 24;\n\n            dst |= saturate_cast<uchar>(b) << (bidx * 8);\n            dst |= saturate_cast<uchar>(g) << 8;\n            dst |= saturate_cast<uchar>(r) << ((bidx ^ 2) * 8);\n\n            return dst;\n        }\n\n        template <int bidx, typename T> static __device__ __forceinline__ void YUV2RGBConvert(const T& src, float* dst)\n        {\n            dst[bidx] = src.x + (src.z - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[3];\n\n            dst[1] = src.x + (src.z - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[2]\n                     + (src.y - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[1];\n\n            dst[bidx^2] = src.x + (src.y - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[0];\n        }\n\n        template <typename T, int scn, int dcn, int bidx> struct YUV2RGB\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n\n                YUV2RGBConvert<bidx>(src, &dst.x);\n                setAlpha(dst, ColorChannel<T>::max());\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ YUV2RGB() {}\n            __host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {}\n        };\n\n        template <int bidx> struct YUV2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>\n        {\n            __device__ __forceinline__ uint operator ()(uint src) const\n            {\n                return YUV2RGBConvert<bidx>(src);\n            }\n            __host__ __device__ __forceinline__ YUV2RGB() {}\n            __host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::YUV2RGB<T, scn, dcn, bidx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n///////////////////////////////////// RGB <-> YCrCb //////////////////////////////////////\n\n    namespace color_detail\n    {\n        __constant__ float c_RGB2YCrCbCoeffs_f[5] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f};\n        __constant__ int   c_RGB2YCrCbCoeffs_i[5] = {R2Y, G2Y, B2Y, 11682, 9241};\n\n        template <int bidx, typename T, typename D> static __device__ void RGB2YCrCbConvert(const T* src, D& dst)\n        {\n            const int delta = ColorChannel<T>::half() * (1 << yuv_shift);\n\n            const int Y = CV_DESCALE(src[0] * c_RGB2YCrCbCoeffs_i[bidx^2] + src[1] * c_RGB2YCrCbCoeffs_i[1] + src[2] * c_RGB2YCrCbCoeffs_i[bidx], yuv_shift);\n            const int Cr = CV_DESCALE((src[bidx^2] - Y) * c_RGB2YCrCbCoeffs_i[3] + delta, yuv_shift);\n            const int Cb = CV_DESCALE((src[bidx] - Y) * c_RGB2YCrCbCoeffs_i[4] + delta, yuv_shift);\n\n            dst.x = saturate_cast<T>(Y);\n            dst.y = saturate_cast<T>(Cr);\n            dst.z = saturate_cast<T>(Cb);\n        }\n\n        template <int bidx> static __device__ uint RGB2YCrCbConvert(uint src)\n        {\n            const int delta = ColorChannel<uchar>::half() * (1 << yuv_shift);\n\n            const int Y = CV_DESCALE((0xffu & src) * c_RGB2YCrCbCoeffs_i[bidx^2] + (0xffu & (src >> 8)) * c_RGB2YCrCbCoeffs_i[1] + (0xffu & (src >> 16)) * c_RGB2YCrCbCoeffs_i[bidx], yuv_shift);\n            const int Cr = CV_DESCALE(((0xffu & (src >> ((bidx ^ 2) * 8))) - Y) * c_RGB2YCrCbCoeffs_i[3] + delta, yuv_shift);\n            const int Cb = CV_DESCALE(((0xffu & (src >> (bidx * 8))) - Y) * c_RGB2YCrCbCoeffs_i[4] + delta, yuv_shift);\n\n            uint dst = 0;\n\n            dst |= saturate_cast<uchar>(Y);\n            dst |= saturate_cast<uchar>(Cr) << 8;\n            dst |= saturate_cast<uchar>(Cb) << 16;\n\n            return dst;\n        }\n\n        template <int bidx, typename D> static __device__ __forceinline__ void RGB2YCrCbConvert(const float* src, D& dst)\n        {\n            dst.x = src[0] * c_RGB2YCrCbCoeffs_f[bidx^2] + src[1] * c_RGB2YCrCbCoeffs_f[1] + src[2] * c_RGB2YCrCbCoeffs_f[bidx];\n            dst.y = (src[bidx^2] - dst.x) * c_RGB2YCrCbCoeffs_f[3] + ColorChannel<float>::half();\n            dst.z = (src[bidx] - dst.x) * c_RGB2YCrCbCoeffs_f[4] + ColorChannel<float>::half();\n        }\n\n        template <typename T, int scn, int dcn, int bidx> struct RGB2YCrCb\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n                RGB2YCrCbConvert<bidx>(&src.x, dst);\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB2YCrCb() {}\n            __host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {}\n        };\n\n        template <int bidx> struct RGB2YCrCb<uchar, 4, 4, bidx> : unary_function<uint, uint>\n        {\n            __device__ __forceinline__ uint operator ()(uint src) const\n            {\n                return RGB2YCrCbConvert<bidx>(src);\n            }\n\n            __host__ __device__ __forceinline__ RGB2YCrCb() {}\n            __host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2YCrCb<T, scn, dcn, bidx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    namespace color_detail\n    {\n        __constant__ float c_YCrCb2RGBCoeffs_f[5] = {1.403f, -0.714f, -0.344f, 1.773f};\n        __constant__ int   c_YCrCb2RGBCoeffs_i[5] = {22987, -11698, -5636, 29049};\n\n        template <int bidx, typename T, typename D> static __device__ void YCrCb2RGBConvert(const T& src, D* dst)\n        {\n            const int b = src.x + CV_DESCALE((src.z - ColorChannel<D>::half()) * c_YCrCb2RGBCoeffs_i[3], yuv_shift);\n            const int g = src.x + CV_DESCALE((src.z - ColorChannel<D>::half()) * c_YCrCb2RGBCoeffs_i[2] + (src.y - ColorChannel<D>::half()) * c_YCrCb2RGBCoeffs_i[1], yuv_shift);\n            const int r = src.x + CV_DESCALE((src.y - ColorChannel<D>::half()) * c_YCrCb2RGBCoeffs_i[0], yuv_shift);\n\n            dst[bidx] = saturate_cast<D>(b);\n            dst[1] = saturate_cast<D>(g);\n            dst[bidx^2] = saturate_cast<D>(r);\n        }\n\n        template <int bidx> static __device__ uint YCrCb2RGBConvert(uint src)\n        {\n            const int x = 0xff & (src);\n            const int y = 0xff & (src >> 8);\n            const int z = 0xff & (src >> 16);\n\n            const int b = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YCrCb2RGBCoeffs_i[3], yuv_shift);\n            const int g = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YCrCb2RGBCoeffs_i[2] + (y - ColorChannel<uchar>::half()) * c_YCrCb2RGBCoeffs_i[1], yuv_shift);\n            const int r = x + CV_DESCALE((y - ColorChannel<uchar>::half()) * c_YCrCb2RGBCoeffs_i[0], yuv_shift);\n\n            uint dst = 0xffu << 24;\n\n            dst |= saturate_cast<uchar>(b) << (bidx * 8);\n            dst |= saturate_cast<uchar>(g) << 8;\n            dst |= saturate_cast<uchar>(r) << ((bidx ^ 2) * 8);\n\n            return dst;\n        }\n\n        template <int bidx, typename T> __device__ __forceinline__ void YCrCb2RGBConvert(const T& src, float* dst)\n        {\n            dst[bidx] = src.x + (src.z - ColorChannel<float>::half()) * c_YCrCb2RGBCoeffs_f[3];\n            dst[1] = src.x + (src.z - ColorChannel<float>::half()) * c_YCrCb2RGBCoeffs_f[2] + (src.y - ColorChannel<float>::half()) * c_YCrCb2RGBCoeffs_f[1];\n            dst[bidx^2] = src.x + (src.y - ColorChannel<float>::half()) * c_YCrCb2RGBCoeffs_f[0];\n        }\n\n        template <typename T, int scn, int dcn, int bidx> struct YCrCb2RGB\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n\n                YCrCb2RGBConvert<bidx>(src, &dst.x);\n                setAlpha(dst, ColorChannel<T>::max());\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ YCrCb2RGB() {}\n            __host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {}\n        };\n\n        template <int bidx> struct YCrCb2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>\n        {\n            __device__ __forceinline__ uint operator ()(uint src) const\n            {\n                return YCrCb2RGBConvert<bidx>(src);\n            }\n            __host__ __device__ __forceinline__ YCrCb2RGB() {}\n            __host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::YCrCb2RGB<T, scn, dcn, bidx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n////////////////////////////////////// RGB <-> XYZ ///////////////////////////////////////\n\n    namespace color_detail\n    {\n        __constant__ float c_RGB2XYZ_D65f[9] = { 0.412453f, 0.357580f, 0.180423f, 0.212671f, 0.715160f, 0.072169f, 0.019334f, 0.119193f, 0.950227f };\n        __constant__ int   c_RGB2XYZ_D65i[9] = { 1689, 1465, 739, 871, 2929, 296, 79, 488, 3892 };\n\n        template <int bidx, typename T, typename D> static __device__ __forceinline__ void RGB2XYZConvert(const T* src, D& dst)\n        {\n            dst.z = saturate_cast<T>(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[6] + src[1] * c_RGB2XYZ_D65i[7] + src[bidx] * c_RGB2XYZ_D65i[8], xyz_shift));\n            dst.x = saturate_cast<T>(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[0] + src[1] * c_RGB2XYZ_D65i[1] + src[bidx] * c_RGB2XYZ_D65i[2], xyz_shift));\n            dst.y = saturate_cast<T>(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[3] + src[1] * c_RGB2XYZ_D65i[4] + src[bidx] * c_RGB2XYZ_D65i[5], xyz_shift));\n        }\n\n        template <int bidx> static __device__ __forceinline__ uint RGB2XYZConvert(uint src)\n        {\n            const uint b = 0xffu & (src >> (bidx * 8));\n            const uint g = 0xffu & (src >> 8);\n            const uint r = 0xffu & (src >> ((bidx ^ 2) * 8));\n\n            const uint x = saturate_cast<uchar>(CV_DESCALE(r * c_RGB2XYZ_D65i[0] + g * c_RGB2XYZ_D65i[1] + b * c_RGB2XYZ_D65i[2], xyz_shift));\n            const uint y = saturate_cast<uchar>(CV_DESCALE(r * c_RGB2XYZ_D65i[3] + g * c_RGB2XYZ_D65i[4] + b * c_RGB2XYZ_D65i[5], xyz_shift));\n            const uint z = saturate_cast<uchar>(CV_DESCALE(r * c_RGB2XYZ_D65i[6] + g * c_RGB2XYZ_D65i[7] + b * c_RGB2XYZ_D65i[8], xyz_shift));\n\n            uint dst = 0;\n\n            dst |= x;\n            dst |= y << 8;\n            dst |= z << 16;\n\n            return dst;\n        }\n\n        template <int bidx, typename D> static __device__ __forceinline__ void RGB2XYZConvert(const float* src, D& dst)\n        {\n            dst.x = src[bidx^2] * c_RGB2XYZ_D65f[0] + src[1] * c_RGB2XYZ_D65f[1] + src[bidx] * c_RGB2XYZ_D65f[2];\n            dst.y = src[bidx^2] * c_RGB2XYZ_D65f[3] + src[1] * c_RGB2XYZ_D65f[4] + src[bidx] * c_RGB2XYZ_D65f[5];\n            dst.z = src[bidx^2] * c_RGB2XYZ_D65f[6] + src[1] * c_RGB2XYZ_D65f[7] + src[bidx] * c_RGB2XYZ_D65f[8];\n        }\n\n        template <typename T, int scn, int dcn, int bidx> struct RGB2XYZ\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n\n                RGB2XYZConvert<bidx>(&src.x, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB2XYZ() {}\n            __host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {}\n        };\n\n        template <int bidx> struct RGB2XYZ<uchar, 4, 4, bidx> : unary_function<uint, uint>\n        {\n            __device__ __forceinline__ uint operator()(uint src) const\n            {\n                return RGB2XYZConvert<bidx>(src);\n            }\n            __host__ __device__ __forceinline__ RGB2XYZ() {}\n            __host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2XYZ<T, scn, dcn, bidx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    namespace color_detail\n    {\n        __constant__ float c_XYZ2sRGB_D65f[9] = { 3.240479f, -1.53715f, -0.498535f, -0.969256f, 1.875991f, 0.041556f, 0.055648f, -0.204043f, 1.057311f };\n        __constant__ int   c_XYZ2sRGB_D65i[9] = { 13273, -6296, -2042, -3970, 7684, 170, 228, -836, 4331 };\n\n        template <int bidx, typename T, typename D> static __device__ __forceinline__ void XYZ2RGBConvert(const T& src, D* dst)\n        {\n            dst[bidx^2] = saturate_cast<D>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[0] + src.y * c_XYZ2sRGB_D65i[1] + src.z * c_XYZ2sRGB_D65i[2], xyz_shift));\n            dst[1]      = saturate_cast<D>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[3] + src.y * c_XYZ2sRGB_D65i[4] + src.z * c_XYZ2sRGB_D65i[5], xyz_shift));\n            dst[bidx]   = saturate_cast<D>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[6] + src.y * c_XYZ2sRGB_D65i[7] + src.z * c_XYZ2sRGB_D65i[8], xyz_shift));\n        }\n\n        template <int bidx> static __device__ __forceinline__ uint XYZ2RGBConvert(uint src)\n        {\n            const int x = 0xff & src;\n            const int y = 0xff & (src >> 8);\n            const int z = 0xff & (src >> 16);\n\n            const uint r = saturate_cast<uchar>(CV_DESCALE(x * c_XYZ2sRGB_D65i[0] + y * c_XYZ2sRGB_D65i[1] + z * c_XYZ2sRGB_D65i[2], xyz_shift));\n            const uint g = saturate_cast<uchar>(CV_DESCALE(x * c_XYZ2sRGB_D65i[3] + y * c_XYZ2sRGB_D65i[4] + z * c_XYZ2sRGB_D65i[5], xyz_shift));\n            const uint b = saturate_cast<uchar>(CV_DESCALE(x * c_XYZ2sRGB_D65i[6] + y * c_XYZ2sRGB_D65i[7] + z * c_XYZ2sRGB_D65i[8], xyz_shift));\n\n            uint dst = 0xffu << 24;\n\n            dst |= b << (bidx * 8);\n            dst |= g << 8;\n            dst |= r << ((bidx ^ 2) * 8);\n\n            return dst;\n        }\n\n        template <int bidx, typename T> static __device__ __forceinline__ void XYZ2RGBConvert(const T& src, float* dst)\n        {\n            dst[bidx^2] = src.x * c_XYZ2sRGB_D65f[0] + src.y * c_XYZ2sRGB_D65f[1] + src.z * c_XYZ2sRGB_D65f[2];\n            dst[1]      = src.x * c_XYZ2sRGB_D65f[3] + src.y * c_XYZ2sRGB_D65f[4] + src.z * c_XYZ2sRGB_D65f[5];\n            dst[bidx]   = src.x * c_XYZ2sRGB_D65f[6] + src.y * c_XYZ2sRGB_D65f[7] + src.z * c_XYZ2sRGB_D65f[8];\n        }\n\n        template <typename T, int scn, int dcn, int bidx> struct XYZ2RGB\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n\n                XYZ2RGBConvert<bidx>(src, &dst.x);\n                setAlpha(dst, ColorChannel<T>::max());\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ XYZ2RGB() {}\n            __host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {}\n        };\n\n        template <int bidx> struct XYZ2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>\n        {\n            __device__ __forceinline__ uint operator()(uint src) const\n            {\n                return XYZ2RGBConvert<bidx>(src);\n            }\n            __host__ __device__ __forceinline__ XYZ2RGB() {}\n            __host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::XYZ2RGB<T, scn, dcn, bidx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n////////////////////////////////////// RGB <-> HSV ///////////////////////////////////////\n\n    namespace color_detail\n    {\n        __constant__ int c_HsvDivTable   [256] = {0, 1044480, 522240, 348160, 261120, 208896, 174080, 149211, 130560, 116053, 104448, 94953, 87040, 80345, 74606, 69632, 65280, 61440, 58027, 54973, 52224, 49737, 47476, 45412, 43520, 41779, 40172, 38684, 37303, 36017, 34816, 33693, 32640, 31651, 30720, 29842, 29013, 28229, 27486, 26782, 26112, 25475, 24869, 24290, 23738, 23211, 22706, 22223, 21760, 21316, 20890, 20480, 20086, 19707, 19342, 18991, 18651, 18324, 18008, 17703, 17408, 17123, 16846, 16579, 16320, 16069, 15825, 15589, 15360, 15137, 14921, 14711, 14507, 14308, 14115, 13926, 13743, 13565, 13391, 13221, 13056, 12895, 12738, 12584, 12434, 12288, 12145, 12006, 11869, 11736, 11605, 11478, 11353, 11231, 11111, 10995, 10880, 10768, 10658, 10550, 10445, 10341, 10240, 10141, 10043, 9947, 9854, 9761, 9671, 9582, 9495, 9410, 9326, 9243, 9162, 9082, 9004, 8927, 8852, 8777, 8704, 8632, 8561, 8492, 8423, 8356, 8290, 8224, 8160, 8097, 8034, 7973, 7913, 7853, 7795, 7737, 7680, 7624, 7569, 7514, 7461, 7408, 7355, 7304, 7253, 7203, 7154, 7105, 7057, 7010, 6963, 6917, 6872, 6827, 6782, 6739, 6695, 6653, 6611, 6569, 6528, 6487, 6447, 6408, 6369, 6330, 6292, 6254, 6217, 6180, 6144, 6108, 6073, 6037, 6003, 5968, 5935, 5901, 5868, 5835, 5803, 5771, 5739, 5708, 5677, 5646, 5615, 5585, 5556, 5526, 5497, 5468, 5440, 5412, 5384, 5356, 5329, 5302, 5275, 5249, 5222, 5196, 5171, 5145, 5120, 5095, 5070, 5046, 5022, 4998, 4974, 4950, 4927, 4904, 4881, 4858, 4836, 4813, 4791, 4769, 4748, 4726, 4705, 4684, 4663, 4642, 4622, 4601, 4581, 4561, 4541, 4522, 4502, 4483, 4464, 4445, 4426, 4407, 4389, 4370, 4352, 4334, 4316, 4298, 4281, 4263, 4246, 4229, 4212, 4195, 4178, 4161, 4145, 4128, 4112, 4096};\n        __constant__ int c_HsvDivTable180[256] = {0, 122880, 61440, 40960, 30720, 24576, 20480, 17554, 15360, 13653, 12288, 11171, 10240, 9452, 8777, 8192, 7680, 7228, 6827, 6467, 6144, 5851, 5585, 5343, 5120, 4915, 4726, 4551, 4389, 4237, 4096, 3964, 3840, 3724, 3614, 3511, 3413, 3321, 3234, 3151, 3072, 2997, 2926, 2858, 2793, 2731, 2671, 2614, 2560, 2508, 2458, 2409, 2363, 2318, 2276, 2234, 2194, 2156, 2119, 2083, 2048, 2014, 1982, 1950, 1920, 1890, 1862, 1834, 1807, 1781, 1755, 1731, 1707, 1683, 1661, 1638, 1617, 1596, 1575, 1555, 1536, 1517, 1499, 1480, 1463, 1446, 1429, 1412, 1396, 1381, 1365, 1350, 1336, 1321, 1307, 1293, 1280, 1267, 1254, 1241, 1229, 1217, 1205, 1193, 1182, 1170, 1159, 1148, 1138, 1127, 1117, 1107, 1097, 1087, 1078, 1069, 1059, 1050, 1041, 1033, 1024, 1016, 1007, 999, 991, 983, 975, 968, 960, 953, 945, 938, 931, 924, 917, 910, 904, 897, 890, 884, 878, 871, 865, 859, 853, 847, 842, 836, 830, 825, 819, 814, 808, 803, 798, 793, 788, 783, 778, 773, 768, 763, 759, 754, 749, 745, 740, 736, 731, 727, 723, 719, 714, 710, 706, 702, 698, 694, 690, 686, 683, 679, 675, 671, 668, 664, 661, 657, 654, 650, 647, 643, 640, 637, 633, 630, 627, 624, 621, 617, 614, 611, 608, 605, 602, 599, 597, 594, 591, 588, 585, 582, 580, 577, 574, 572, 569, 566, 564, 561, 559, 556, 554, 551, 549, 546, 544, 541, 539, 537, 534, 532, 530, 527, 525, 523, 521, 518, 516, 514, 512, 510, 508, 506, 504, 502, 500, 497, 495, 493, 492, 490, 488, 486, 484, 482};\n        __constant__ int c_HsvDivTable256[256] = {0, 174763, 87381, 58254, 43691, 34953, 29127, 24966, 21845, 19418, 17476, 15888, 14564, 13443, 12483, 11651, 10923, 10280, 9709, 9198, 8738, 8322, 7944, 7598, 7282, 6991, 6722, 6473, 6242, 6026, 5825, 5638, 5461, 5296, 5140, 4993, 4855, 4723, 4599, 4481, 4369, 4263, 4161, 4064, 3972, 3884, 3799, 3718, 3641, 3567, 3495, 3427, 3361, 3297, 3236, 3178, 3121, 3066, 3013, 2962, 2913, 2865, 2819, 2774, 2731, 2689, 2648, 2608, 2570, 2533, 2497, 2461, 2427, 2394, 2362, 2330, 2300, 2270, 2241, 2212, 2185, 2158, 2131, 2106, 2081, 2056, 2032, 2009, 1986, 1964, 1942, 1920, 1900, 1879, 1859, 1840, 1820, 1802, 1783, 1765, 1748, 1730, 1713, 1697, 1680, 1664, 1649, 1633, 1618, 1603, 1589, 1574, 1560, 1547, 1533, 1520, 1507, 1494, 1481, 1469, 1456, 1444, 1432, 1421, 1409, 1398, 1387, 1376, 1365, 1355, 1344, 1334, 1324, 1314, 1304, 1295, 1285, 1276, 1266, 1257, 1248, 1239, 1231, 1222, 1214, 1205, 1197, 1189, 1181, 1173, 1165, 1157, 1150, 1142, 1135, 1128, 1120, 1113, 1106, 1099, 1092, 1085, 1079, 1072, 1066, 1059, 1053, 1046, 1040, 1034, 1028, 1022, 1016, 1010, 1004, 999, 993, 987, 982, 976, 971, 966, 960, 955, 950, 945, 940, 935, 930, 925, 920, 915, 910, 906, 901, 896, 892, 887, 883, 878, 874, 869, 865, 861, 857, 853, 848, 844, 840, 836, 832, 828, 824, 820, 817, 813, 809, 805, 802, 798, 794, 791, 787, 784, 780, 777, 773, 770, 767, 763, 760, 757, 753, 750, 747, 744, 741, 737, 734, 731, 728, 725, 722, 719, 716, 713, 710, 708, 705, 702, 699, 696, 694, 691, 688, 685};\n\n        template <int bidx, int hr, typename D> static __device__ void RGB2HSVConvert(const uchar* src, D& dst)\n        {\n            const int hsv_shift = 12;\n            const int* hdiv_table = hr == 180 ? c_HsvDivTable180 : c_HsvDivTable256;\n\n            int b = src[bidx], g = src[1], r = src[bidx^2];\n            int h, s, v = b;\n            int vmin = b, diff;\n            int vr, vg;\n\n            v = ::max(v, g);\n            v = ::max(v, r);\n            vmin = ::min(vmin, g);\n            vmin = ::min(vmin, r);\n\n            diff = v - vmin;\n            vr = (v == r) * -1;\n            vg = (v == g) * -1;\n\n            s = (diff * c_HsvDivTable[v] + (1 << (hsv_shift-1))) >> hsv_shift;\n            h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff))));\n            h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift;\n            h += (h < 0) * hr;\n\n            dst.x = saturate_cast<uchar>(h);\n            dst.y = (uchar)s;\n            dst.z = (uchar)v;\n        }\n\n        template <int bidx, int hr> static __device__ uint RGB2HSVConvert(uint src)\n        {\n            const int hsv_shift = 12;\n            const int* hdiv_table = hr == 180 ? c_HsvDivTable180 : c_HsvDivTable256;\n\n            const int b = 0xff & (src >> (bidx * 8));\n            const int g = 0xff & (src >> 8);\n            const int r = 0xff & (src >> ((bidx ^ 2) * 8));\n\n            int h, s, v = b;\n            int vmin = b, diff;\n            int vr, vg;\n\n            v = ::max(v, g);\n            v = ::max(v, r);\n            vmin = ::min(vmin, g);\n            vmin = ::min(vmin, r);\n\n            diff = v - vmin;\n            vr = (v == r) * -1;\n            vg = (v == g) * -1;\n\n            s = (diff * c_HsvDivTable[v] + (1 << (hsv_shift-1))) >> hsv_shift;\n            h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff))));\n            h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift;\n            h += (h < 0) * hr;\n\n            uint dst = 0;\n\n            dst |= saturate_cast<uchar>(h);\n            dst |= (0xffu & s) << 8;\n            dst |= (0xffu & v) << 16;\n\n            return dst;\n        }\n\n        template <int bidx, int hr, typename D> static __device__ void RGB2HSVConvert(const float* src, D& dst)\n        {\n            const float hscale = hr * (1.f / 360.f);\n\n            float b = src[bidx], g = src[1], r = src[bidx^2];\n            float h, s, v;\n\n            float vmin, diff;\n\n            v = vmin = r;\n            v = fmax(v, g);\n            v = fmax(v, b);\n            vmin = fmin(vmin, g);\n            vmin = fmin(vmin, b);\n\n            diff = v - vmin;\n            s = diff / (float)(::fabs(v) + numeric_limits<float>::epsilon());\n            diff = (float)(60. / (diff + numeric_limits<float>::epsilon()));\n\n            h  = (v == r) * (g - b) * diff;\n            h += (v != r && v == g) * ((b - r) * diff + 120.f);\n            h += (v != r && v != g) * ((r - g) * diff + 240.f);\n            h += (h < 0) * 360.f;\n\n            dst.x = h * hscale;\n            dst.y = s;\n            dst.z = v;\n        }\n\n        template <typename T, int scn, int dcn, int bidx, int hr> struct RGB2HSV\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n\n                RGB2HSVConvert<bidx, hr>(&src.x, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB2HSV() {}\n            __host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {}\n        };\n\n        template <int bidx, int hr> struct RGB2HSV<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>\n        {\n            __device__ __forceinline__ uint operator()(uint src) const\n            {\n                return RGB2HSVConvert<bidx, hr>(src);\n            }\n            __host__ __device__ __forceinline__ RGB2HSV() {}\n            __host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2HSV<T, scn, dcn, bidx, 180> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <typename T> struct name ## _full_traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2HSV<T, scn, dcn, bidx, 256> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <> struct name ## _traits<float> \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2HSV<float, scn, dcn, bidx, 360> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <> struct name ## _full_traits<float> \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2HSV<float, scn, dcn, bidx, 360> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    namespace color_detail\n    {\n        __constant__ int c_HsvSectorData[6][3] = { {1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0} };\n\n        template <int bidx, int hr, typename T> static __device__ void HSV2RGBConvert(const T& src, float* dst)\n        {\n            const float hscale = 6.f / hr;\n\n            float h = src.x, s = src.y, v = src.z;\n            float b = v, g = v, r = v;\n\n            if (s != 0)\n            {\n                h *= hscale;\n\n                if( h < 0 )\n                    do h += 6; while( h < 0 );\n                else if( h >= 6 )\n                    do h -= 6; while( h >= 6 );\n\n                int sector = __float2int_rd(h);\n                h -= sector;\n\n                if ( (unsigned)sector >= 6u )\n                {\n                    sector = 0;\n                    h = 0.f;\n                }\n\n                float tab[4];\n                tab[0] = v;\n                tab[1] = v * (1.f - s);\n                tab[2] = v * (1.f - s * h);\n                tab[3] = v * (1.f - s * (1.f - h));\n\n                b = tab[c_HsvSectorData[sector][0]];\n                g = tab[c_HsvSectorData[sector][1]];\n                r = tab[c_HsvSectorData[sector][2]];\n            }\n\n            dst[bidx] = b;\n            dst[1] = g;\n            dst[bidx^2] = r;\n        }\n\n        template <int bidx, int HR, typename T> static __device__ void HSV2RGBConvert(const T& src, uchar* dst)\n        {\n            float3 buf;\n\n            buf.x = src.x;\n            buf.y = src.y * (1.f / 255.f);\n            buf.z = src.z * (1.f / 255.f);\n\n            HSV2RGBConvert<bidx, HR>(buf, &buf.x);\n\n            dst[0] = saturate_cast<uchar>(buf.x * 255.f);\n            dst[1] = saturate_cast<uchar>(buf.y * 255.f);\n            dst[2] = saturate_cast<uchar>(buf.z * 255.f);\n        }\n\n        template <int bidx, int hr> static __device__ uint HSV2RGBConvert(uint src)\n        {\n            float3 buf;\n\n            buf.x = src & 0xff;\n            buf.y = ((src >> 8) & 0xff) * (1.f/255.f);\n            buf.z = ((src >> 16) & 0xff) * (1.f/255.f);\n\n            HSV2RGBConvert<bidx, hr>(buf, &buf.x);\n\n            uint dst = 0xffu << 24;\n\n            dst |= saturate_cast<uchar>(buf.x * 255.f);\n            dst |= saturate_cast<uchar>(buf.y * 255.f) << 8;\n            dst |= saturate_cast<uchar>(buf.z * 255.f) << 16;\n\n            return dst;\n        }\n\n        template <typename T, int scn, int dcn, int bidx, int hr> struct HSV2RGB\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n\n                HSV2RGBConvert<bidx, hr>(src, &dst.x);\n                setAlpha(dst, ColorChannel<T>::max());\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ HSV2RGB() {}\n            __host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {}\n        };\n\n        template <int bidx, int hr> struct HSV2RGB<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>\n        {\n            __device__ __forceinline__ uint operator()(uint src) const\n            {\n                return HSV2RGBConvert<bidx, hr>(src);\n            }\n            __host__ __device__ __forceinline__ HSV2RGB() {}\n            __host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::HSV2RGB<T, scn, dcn, bidx, 180> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <typename T> struct name ## _full_traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::HSV2RGB<T, scn, dcn, bidx, 255> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <> struct name ## _traits<float> \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::HSV2RGB<float, scn, dcn, bidx, 360> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <> struct name ## _full_traits<float> \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::HSV2RGB<float, scn, dcn, bidx, 360> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n/////////////////////////////////////// RGB <-> HLS ////////////////////////////////////////\n\n    namespace color_detail\n    {\n        template <int bidx, int hr, typename D> static __device__ void RGB2HLSConvert(const float* src, D& dst)\n        {\n            const float hscale = hr * (1.f / 360.f);\n\n            float b = src[bidx], g = src[1], r = src[bidx^2];\n            float h = 0.f, s = 0.f, l;\n            float vmin, vmax, diff;\n\n            vmax = vmin = r;\n            vmax = fmax(vmax, g);\n            vmax = fmax(vmax, b);\n            vmin = fmin(vmin, g);\n            vmin = fmin(vmin, b);\n\n            diff = vmax - vmin;\n            l = (vmax + vmin) * 0.5f;\n\n            if (diff > numeric_limits<float>::epsilon())\n            {\n                s = (l < 0.5f) * diff / (vmax + vmin);\n                s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);\n\n                diff = 60.f / diff;\n\n                h  = (vmax == r) * (g - b) * diff;\n                h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);\n                h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);\n                h += (h < 0.f) * 360.f;\n            }\n\n            dst.x = h * hscale;\n            dst.y = l;\n            dst.z = s;\n        }\n\n        template <int bidx, int hr, typename D> static __device__ void RGB2HLSConvert(const uchar* src, D& dst)\n        {\n            float3 buf;\n\n            buf.x = src[0] * (1.f / 255.f);\n            buf.y = src[1] * (1.f / 255.f);\n            buf.z = src[2] * (1.f / 255.f);\n\n            RGB2HLSConvert<bidx, hr>(&buf.x, buf);\n\n            dst.x = saturate_cast<uchar>(buf.x);\n            dst.y = saturate_cast<uchar>(buf.y*255.f);\n            dst.z = saturate_cast<uchar>(buf.z*255.f);\n        }\n\n        template <int bidx, int hr> static __device__ uint RGB2HLSConvert(uint src)\n        {\n            float3 buf;\n\n            buf.x = (0xff & src) * (1.f / 255.f);\n            buf.y = (0xff & (src >> 8)) * (1.f / 255.f);\n            buf.z = (0xff & (src >> 16)) * (1.f / 255.f);\n\n            RGB2HLSConvert<bidx, hr>(&buf.x, buf);\n\n            uint dst = 0xffu << 24;\n\n            dst |= saturate_cast<uchar>(buf.x);\n            dst |= saturate_cast<uchar>(buf.y * 255.f) << 8;\n            dst |= saturate_cast<uchar>(buf.z * 255.f) << 16;\n\n            return dst;\n        }\n\n        template <typename T, int scn, int dcn, int bidx, int hr> struct RGB2HLS\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n\n                RGB2HLSConvert<bidx, hr>(&src.x, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB2HLS() {}\n            __host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {}\n        };\n\n        template <int bidx, int hr> struct RGB2HLS<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>\n        {\n            __device__ __forceinline__ uint operator()(uint src) const\n            {\n                return RGB2HLSConvert<bidx, hr>(src);\n            }\n            __host__ __device__ __forceinline__ RGB2HLS() {}\n            __host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2HLS<T, scn, dcn, bidx, 180> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <typename T> struct name ## _full_traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2HLS<T, scn, dcn, bidx, 256> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <> struct name ## _traits<float> \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2HLS<float, scn, dcn, bidx, 360> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <> struct name ## _full_traits<float> \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2HLS<float, scn, dcn, bidx, 360> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    namespace color_detail\n    {\n        __constant__ int c_HlsSectorData[6][3] = { {1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0} };\n\n        template <int bidx, int hr, typename T> static __device__ void HLS2RGBConvert(const T& src, float* dst)\n        {\n            const float hscale = 6.0f / hr;\n\n            float h = src.x, l = src.y, s = src.z;\n            float b = l, g = l, r = l;\n\n            if (s != 0)\n            {\n                float p2  = (l <= 0.5f) * l * (1 + s);\n                      p2 += (l > 0.5f) * (l + s - l * s);\n                float p1 = 2 * l - p2;\n\n                h *= hscale;\n\n                if( h < 0 )\n                    do h += 6; while( h < 0 );\n                else if( h >= 6 )\n                    do h -= 6; while( h >= 6 );\n\n                int sector;\n                sector = __float2int_rd(h);\n\n                h -= sector;\n\n                float tab[4];\n                tab[0] = p2;\n                tab[1] = p1;\n                tab[2] = p1 + (p2 - p1) * (1 - h);\n                tab[3] = p1 + (p2 - p1) * h;\n\n                b = tab[c_HlsSectorData[sector][0]];\n                g = tab[c_HlsSectorData[sector][1]];\n                r = tab[c_HlsSectorData[sector][2]];\n            }\n\n            dst[bidx] = b;\n            dst[1] = g;\n            dst[bidx^2] = r;\n        }\n\n        template <int bidx, int hr, typename T> static __device__ void HLS2RGBConvert(const T& src, uchar* dst)\n        {\n            float3 buf;\n\n            buf.x = src.x;\n            buf.y = src.y * (1.f / 255.f);\n            buf.z = src.z * (1.f / 255.f);\n\n            HLS2RGBConvert<bidx, hr>(buf, &buf.x);\n\n            dst[0] = saturate_cast<uchar>(buf.x * 255.f);\n            dst[1] = saturate_cast<uchar>(buf.y * 255.f);\n            dst[2] = saturate_cast<uchar>(buf.z * 255.f);\n        }\n\n        template <int bidx, int hr> static __device__ uint HLS2RGBConvert(uint src)\n        {\n            float3 buf;\n\n            buf.x = 0xff & src;\n            buf.y = (0xff & (src >> 8)) * (1.f / 255.f);\n            buf.z = (0xff & (src >> 16)) * (1.f / 255.f);\n\n            HLS2RGBConvert<bidx, hr>(buf, &buf.x);\n\n            uint dst = 0xffu << 24;\n\n            dst |= saturate_cast<uchar>(buf.x * 255.f);\n            dst |= saturate_cast<uchar>(buf.y * 255.f) << 8;\n            dst |= saturate_cast<uchar>(buf.z * 255.f) << 16;\n\n            return dst;\n        }\n\n        template <typename T, int scn, int dcn, int bidx, int hr> struct HLS2RGB\n            : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\n            {\n                typename TypeVec<T, dcn>::vec_type dst;\n\n                HLS2RGBConvert<bidx, hr>(src, &dst.x);\n                setAlpha(dst, ColorChannel<T>::max());\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ HLS2RGB() {}\n            __host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {}\n        };\n\n        template <int bidx, int hr> struct HLS2RGB<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>\n        {\n            __device__ __forceinline__ uint operator()(uint src) const\n            {\n                return HLS2RGBConvert<bidx, hr>(src);\n            }\n            __host__ __device__ __forceinline__ HLS2RGB() {}\n            __host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(name, scn, dcn, bidx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::HLS2RGB<T, scn, dcn, bidx, 180> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <typename T> struct name ## _full_traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::HLS2RGB<T, scn, dcn, bidx, 255> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <> struct name ## _traits<float> \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::HLS2RGB<float, scn, dcn, bidx, 360> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    }; \\\n    template <> struct name ## _full_traits<float> \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::HLS2RGB<float, scn, dcn, bidx, 360> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n///////////////////////////////////// RGB <-> Lab /////////////////////////////////////\n\n    namespace color_detail\n    {\n        enum\n        {\n            LAB_CBRT_TAB_SIZE = 1024,\n            GAMMA_TAB_SIZE = 1024,\n            lab_shift = xyz_shift,\n            gamma_shift = 3,\n            lab_shift2 = (lab_shift + gamma_shift),\n            LAB_CBRT_TAB_SIZE_B = (256 * 3 / 2 * (1 << gamma_shift))\n        };\n\n        __constant__ ushort c_sRGBGammaTab_b[] = {0,1,1,2,2,3,4,4,5,6,6,7,8,8,9,10,11,11,12,13,14,15,16,17,19,20,21,22,24,25,26,28,29,31,33,34,36,38,40,41,43,45,47,49,51,54,56,58,60,63,65,68,70,73,75,78,81,83,86,89,92,95,98,101,105,108,111,115,118,121,125,129,132,136,140,144,147,151,155,160,164,168,172,176,181,185,190,194,199,204,209,213,218,223,228,233,239,244,249,255,260,265,271,277,282,288,294,300,306,312,318,324,331,337,343,350,356,363,370,376,383,390,397,404,411,418,426,433,440,448,455,463,471,478,486,494,502,510,518,527,535,543,552,560,569,578,586,595,604,613,622,631,641,650,659,669,678,688,698,707,717,727,737,747,757,768,778,788,799,809,820,831,842,852,863,875,886,897,908,920,931,943,954,966,978,990,1002,1014,1026,1038,1050,1063,1075,1088,1101,1113,1126,1139,1152,1165,1178,1192,1205,1218,1232,1245,1259,1273,1287,1301,1315,1329,1343,1357,1372,1386,1401,1415,1430,1445,1460,1475,1490,1505,1521,1536,1551,1567,1583,1598,1614,1630,1646,1662,1678,1695,1711,1728,1744,1761,1778,1794,1811,1828,1846,1863,1880,1897,1915,1933,1950,1968,1986,2004,2022,2040};\n\n        __device__ __forceinline__ int LabCbrt_b(int i)\n        {\n            float x = i * (1.f / (255.f * (1 << gamma_shift)));\n            return (1 << lab_shift2) * (x < 0.008856f ? x * 7.787f + 0.13793103448275862f : ::cbrtf(x));\n        }\n\n        template <bool srgb, int blueIdx, typename T, typename D>\n        __device__ __forceinline__ void RGB2LabConvert_b(const T& src, D& dst)\n        {\n            const int Lscale = (116 * 255 + 50) / 100;\n            const int Lshift = -((16 * 255 * (1 << lab_shift2) + 50) / 100);\n\n            int B = blueIdx == 0 ? src.x : src.z;\n            int G = src.y;\n            int R = blueIdx == 0 ? src.z : src.x;\n\n            if (srgb)\n            {\n                B = c_sRGBGammaTab_b[B];\n                G = c_sRGBGammaTab_b[G];\n                R = c_sRGBGammaTab_b[R];\n            }\n            else\n            {\n                B <<= 3;\n                G <<= 3;\n                R <<= 3;\n            }\n\n            int fX = LabCbrt_b(CV_DESCALE(B * 778 + G * 1541 + R * 1777, lab_shift));\n            int fY = LabCbrt_b(CV_DESCALE(B * 296 + G * 2929 + R * 871, lab_shift));\n            int fZ = LabCbrt_b(CV_DESCALE(B * 3575 + G * 448 + R * 73, lab_shift));\n\n            int L = CV_DESCALE(Lscale * fY + Lshift, lab_shift2);\n            int a = CV_DESCALE(500 * (fX - fY) + 128 * (1 << lab_shift2), lab_shift2);\n            int b = CV_DESCALE(200 * (fY - fZ) + 128 * (1 << lab_shift2), lab_shift2);\n\n            dst.x = saturate_cast<uchar>(L);\n            dst.y = saturate_cast<uchar>(a);\n            dst.z = saturate_cast<uchar>(b);\n        }\n\n        __device__ __forceinline__ float splineInterpolate(float x, const float* tab, int n)\n        {\n            int ix = ::min(::max(int(x), 0), n-1);\n            x -= ix;\n            tab += ix * 4;\n            return ((tab[3] * x + tab[2]) * x + tab[1]) * x + tab[0];\n        }\n\n        __constant__ float c_sRGBGammaTab[] = {0,7.55853e-05,0.,-7.51331e-13,7.55853e-05,7.55853e-05,-2.25399e-12,3.75665e-12,0.000151171,7.55853e-05,9.01597e-12,-6.99932e-12,0.000226756,7.55853e-05,-1.1982e-11,2.41277e-12,0.000302341,7.55853e-05,-4.74369e-12,1.19001e-11,0.000377927,7.55853e-05,3.09568e-11,-2.09095e-11,0.000453512,7.55853e-05,-3.17718e-11,1.35303e-11,0.000529097,7.55853e-05,8.81905e-12,-4.10782e-12,0.000604683,7.55853e-05,-3.50439e-12,2.90097e-12,0.000680268,7.55853e-05,5.19852e-12,-7.49607e-12,0.000755853,7.55853e-05,-1.72897e-11,2.70833e-11,0.000831439,7.55854e-05,6.39602e-11,-4.26295e-11,0.000907024,7.55854e-05,-6.39282e-11,2.70193e-11,0.000982609,7.55853e-05,1.71298e-11,-7.24017e-12,0.00105819,7.55853e-05,-4.59077e-12,1.94137e-12,0.00113378,7.55853e-05,1.23333e-12,-5.25291e-13,0.00120937,7.55853e-05,-3.42545e-13,1.59799e-13,0.00128495,7.55853e-05,1.36852e-13,-1.13904e-13,0.00136054,7.55853e-05,-2.04861e-13,2.95818e-13,0.00143612,7.55853e-05,6.82594e-13,-1.06937e-12,0.00151171,7.55853e-05,-2.52551e-12,3.98166e-12,0.00158729,7.55853e-05,9.41946e-12,-1.48573e-11,0.00166288,7.55853e-05,-3.51523e-11,5.54474e-11,0.00173846,7.55854e-05,1.3119e-10,-9.0517e-11,0.00181405,7.55854e-05,-1.40361e-10,7.37899e-11,0.00188963,7.55853e-05,8.10085e-11,-8.82272e-11,0.00196522,7.55852e-05,-1.83673e-10,1.62704e-10,0.0020408,7.55853e-05,3.04438e-10,-2.13341e-10,0.00211639,7.55853e-05,-3.35586e-10,2.25e-10,0.00219197,7.55853e-05,3.39414e-10,-2.20997e-10,0.00226756,7.55853e-05,-3.23576e-10,1.93326e-10,0.00234315,7.55853e-05,2.564e-10,-8.66446e-11,0.00241873,7.55855e-05,-3.53328e-12,-7.9578e-11,0.00249432,7.55853e-05,-2.42267e-10,1.72126e-10,0.0025699,7.55853e-05,2.74111e-10,-1.43265e-10,0.00264549,7.55854e-05,-1.55683e-10,-6.47292e-11,0.00272107,7.55849e-05,-3.4987e-10,8.67842e-10,0.00279666,7.55868e-05,2.25366e-09,-3.8723e-09,0.00287224,7.55797e-05,-9.36325e-09,1.5087e-08,0.00294783,7.56063e-05,3.58978e-08,-5.69415e-08,0.00302341,7.55072e-05,-1.34927e-07,2.13144e-07,0.003099,7.58768e-05,5.04507e-07,1.38713e-07,0.00317552,7.7302e-05,9.20646e-07,-1.55186e-07,0.00325359,7.86777e-05,4.55087e-07,4.26813e-08,0.00333276,7.97159e-05,5.83131e-07,-1.06495e-08,0.00341305,8.08502e-05,5.51182e-07,3.87467e-09,0.00349446,8.19642e-05,5.62806e-07,-1.92586e-10,0.00357698,8.30892e-05,5.62228e-07,1.0866e-09,0.00366063,8.4217e-05,5.65488e-07,5.02818e-10,0.00374542,8.53494e-05,5.66997e-07,8.60211e-10,0.00383133,8.6486e-05,5.69577e-07,7.13044e-10,0.00391839,8.76273e-05,5.71716e-07,4.78527e-10,0.00400659,8.87722e-05,5.73152e-07,1.09818e-09,0.00409594,8.99218e-05,5.76447e-07,2.50964e-10,0.00418644,9.10754e-05,5.772e-07,1.15762e-09,0.00427809,9.22333e-05,5.80672e-07,2.40865e-10,0.0043709,9.33954e-05,5.81395e-07,1.13854e-09,0.00446488,9.45616e-05,5.84811e-07,3.27267e-10,0.00456003,9.57322e-05,5.85792e-07,8.1197e-10,0.00465635,9.69062e-05,5.88228e-07,6.15823e-10,0.00475384,9.80845e-05,5.90076e-07,9.15747e-10,0.00485252,9.92674e-05,5.92823e-07,3.778e-10,0.00495238,0.000100454,5.93956e-07,8.32623e-10,0.00505343,0.000101645,5.96454e-07,4.82695e-10,0.00515567,0.000102839,5.97902e-07,9.61904e-10,0.00525911,0.000104038,6.00788e-07,3.26281e-10,0.00536375,0.00010524,6.01767e-07,9.926e-10,0.00546959,0.000106447,6.04745e-07,3.59933e-10,0.00557664,0.000107657,6.05824e-07,8.2728e-10,0.0056849,0.000108871,6.08306e-07,5.21898e-10,0.00579438,0.00011009,6.09872e-07,8.10492e-10,0.00590508,0.000111312,6.12303e-07,4.27046e-10,0.00601701,0.000112538,6.13585e-07,7.40878e-10,0.00613016,0.000113767,6.15807e-07,8.00469e-10,0.00624454,0.000115001,6.18209e-07,2.48178e-10,0.00636016,0.000116238,6.18953e-07,1.00073e-09,0.00647702,0.000117479,6.21955e-07,4.05654e-10,0.00659512,0.000118724,6.23172e-07,6.36192e-10,0.00671447,0.000119973,6.25081e-07,7.74927e-10,0.00683507,0.000121225,6.27406e-07,4.54975e-10,0.00695692,0.000122481,6.28771e-07,6.64841e-10,0.00708003,0.000123741,6.30765e-07,6.10972e-10,0.00720441,0.000125004,6.32598e-07,6.16543e-10,0.00733004,0.000126271,6.34448e-07,6.48204e-10,0.00745695,0.000127542,6.36392e-07,5.15835e-10,0.00758513,0.000128816,6.3794e-07,5.48103e-10,0.00771458,0.000130094,6.39584e-07,1.01706e-09,0.00784532,0.000131376,6.42635e-07,4.0283e-11,0.00797734,0.000132661,6.42756e-07,6.84471e-10,0.00811064,0.000133949,6.4481e-07,9.47144e-10,0.00824524,0.000135241,6.47651e-07,1.83472e-10,0.00838112,0.000136537,6.48201e-07,1.11296e-09,0.00851831,0.000137837,6.5154e-07,2.13163e-11,0.0086568,0.00013914,6.51604e-07,6.64462e-10,0.00879659,0.000140445,6.53598e-07,1.04613e-09,0.00893769,0.000141756,6.56736e-07,-1.92377e-10,0.0090801,0.000143069,6.56159e-07,1.58601e-09,0.00922383,0.000144386,6.60917e-07,-5.63754e-10,0.00936888,0.000145706,6.59226e-07,1.60033e-09,0.00951524,0.000147029,6.64027e-07,-2.49543e-10,0.00966294,0.000148356,6.63278e-07,1.26043e-09,0.00981196,0.000149687,6.67059e-07,-1.35572e-10,0.00996231,0.00015102,6.66653e-07,1.14458e-09,0.010114,0.000152357,6.70086e-07,2.13864e-10,0.010267,0.000153698,6.70728e-07,7.93856e-10,0.0104214,0.000155042,6.73109e-07,3.36077e-10,0.0105771,0.000156389,6.74118e-07,6.55765e-10,0.0107342,0.000157739,6.76085e-07,7.66211e-10,0.0108926,0.000159094,6.78384e-07,4.66116e-12,0.0110524,0.000160451,6.78398e-07,1.07775e-09,0.0112135,0.000161811,6.81631e-07,3.41023e-10,0.011376,0.000163175,6.82654e-07,3.5205e-10,0.0115398,0.000164541,6.8371e-07,1.04473e-09,0.0117051,0.000165912,6.86844e-07,1.25757e-10,0.0118717,0.000167286,6.87222e-07,3.14818e-10,0.0120396,0.000168661,6.88166e-07,1.40886e-09,0.012209,0.000170042,6.92393e-07,-3.62244e-10,0.0123797,0.000171425,6.91306e-07,9.71397e-10,0.0125518,0.000172811,6.9422e-07,2.02003e-10,0.0127253,0.0001742,6.94826e-07,1.01448e-09,0.0129002,0.000175593,6.97869e-07,3.96653e-10,0.0130765,0.00017699,6.99059e-07,1.92927e-10,0.0132542,0.000178388,6.99638e-07,6.94305e-10,0.0134333,0.00017979,7.01721e-07,7.55108e-10,0.0136138,0.000181195,7.03986e-07,1.05918e-11,0.0137957,0.000182603,7.04018e-07,1.06513e-09,0.013979,0.000184015,7.07214e-07,3.85512e-10,0.0141637,0.00018543,7.0837e-07,1.86769e-10,0.0143499,0.000186848,7.0893e-07,7.30116e-10,0.0145374,0.000188268,7.11121e-07,6.17983e-10,0.0147264,0.000189692,7.12975e-07,5.23282e-10,0.0149168,0.000191119,7.14545e-07,8.28398e-11,0.0151087,0.000192549,7.14793e-07,1.0081e-09,0.0153019,0.000193981,7.17817e-07,5.41244e-10,0.0154966,0.000195418,7.19441e-07,-3.7907e-10,0.0156928,0.000196856,7.18304e-07,1.90641e-09,0.0158903,0.000198298,7.24023e-07,-7.27387e-10,0.0160893,0.000199744,7.21841e-07,1.00317e-09,0.0162898,0.000201191,7.24851e-07,4.39949e-10,0.0164917,0.000202642,7.2617e-07,9.6234e-10,0.0166951,0.000204097,7.29057e-07,-5.64019e-10,0.0168999,0.000205554,7.27365e-07,1.29374e-09,0.0171062,0.000207012,7.31247e-07,9.77025e-10,0.017314,0.000208478,7.34178e-07,-1.47651e-09,0.0175232,0.000209942,7.29748e-07,3.06636e-09,0.0177338,0.00021141,7.38947e-07,-1.47573e-09,0.017946,0.000212884,7.3452e-07,9.7386e-10,0.0181596,0.000214356,7.37442e-07,1.30562e-09,0.0183747,0.000215835,7.41358e-07,-6.08376e-10,0.0185913,0.000217315,7.39533e-07,1.12785e-09,0.0188093,0.000218798,7.42917e-07,-1.77711e-10,0.0190289,0.000220283,7.42384e-07,1.44562e-09,0.0192499,0.000221772,7.46721e-07,-1.68825e-11,0.0194724,0.000223266,7.4667e-07,4.84533e-10,0.0196964,0.000224761,7.48124e-07,-5.85298e-11,0.0199219,0.000226257,7.47948e-07,1.61217e-09,0.0201489,0.000227757,7.52785e-07,-8.02136e-10,0.0203775,0.00022926,7.50378e-07,1.59637e-09,0.0206075,0.000230766,7.55167e-07,4.47168e-12,0.020839,0.000232276,7.55181e-07,2.48387e-10,0.021072,0.000233787,7.55926e-07,8.6474e-10,0.0213066,0.000235302,7.5852e-07,1.78299e-11,0.0215426,0.000236819,7.58573e-07,9.26567e-10,0.0217802,0.000238339,7.61353e-07,1.34529e-12,0.0220193,0.000239862,7.61357e-07,9.30659e-10,0.0222599,0.000241387,7.64149e-07,1.34529e-12,0.0225021,0.000242915,7.64153e-07,9.26567e-10,0.0227458,0.000244447,7.66933e-07,1.76215e-11,0.022991,0.00024598,7.66986e-07,8.65536e-10,0.0232377,0.000247517,7.69582e-07,2.45677e-10,0.023486,0.000249057,7.70319e-07,1.44193e-11,0.0237358,0.000250598,7.70363e-07,1.55918e-09,0.0239872,0.000252143,7.7504e-07,-6.63173e-10,0.0242401,0.000253691,7.73051e-07,1.09357e-09,0.0244946,0.000255241,7.76331e-07,1.41919e-11,0.0247506,0.000256793,7.76374e-07,7.12248e-10,0.0250082,0.000258348,7.78511e-07,8.62049e-10,0.0252673,0.000259908,7.81097e-07,-4.35061e-10,0.025528,0.000261469,7.79792e-07,8.7825e-10,0.0257902,0.000263031,7.82426e-07,6.47181e-10,0.0260541,0.000264598,7.84368e-07,2.58448e-10,0.0263194,0.000266167,7.85143e-07,1.81558e-10,0.0265864,0.000267738,7.85688e-07,8.78041e-10,0.0268549,0.000269312,7.88322e-07,3.15102e-11,0.027125,0.000270889,7.88417e-07,8.58525e-10,0.0273967,0.000272468,7.90992e-07,2.59812e-10,0.02767,0.000274051,7.91772e-07,-3.5224e-11,0.0279448,0.000275634,7.91666e-07,1.74377e-09,0.0282212,0.000277223,7.96897e-07,-1.35196e-09,0.0284992,0.000278813,7.92841e-07,1.80141e-09,0.0287788,0.000280404,7.98246e-07,-2.65629e-10,0.0290601,0.000281999,7.97449e-07,1.12374e-09,0.0293428,0.000283598,8.0082e-07,-5.04106e-10,0.0296272,0.000285198,7.99308e-07,8.92764e-10,0.0299132,0.000286799,8.01986e-07,6.58379e-10,0.0302008,0.000288405,8.03961e-07,1.98971e-10,0.0304901,0.000290014,8.04558e-07,4.08382e-10,0.0307809,0.000291624,8.05783e-07,3.01839e-11,0.0310733,0.000293236,8.05874e-07,1.33343e-09,0.0313673,0.000294851,8.09874e-07,2.2419e-10,0.031663,0.000296472,8.10547e-07,-3.67606e-10,0.0319603,0.000298092,8.09444e-07,1.24624e-09,0.0322592,0.000299714,8.13182e-07,-8.92025e-10,0.0325597,0.000301338,8.10506e-07,2.32183e-09,0.0328619,0.000302966,8.17472e-07,-9.44719e-10,0.0331657,0.000304598,8.14638e-07,1.45703e-09,0.0334711,0.000306232,8.19009e-07,-1.15805e-09,0.0337781,0.000307866,8.15535e-07,3.17507e-09,0.0340868,0.000309507,8.2506e-07,-4.09161e-09,0.0343971,0.000311145,8.12785e-07,5.74079e-09,0.0347091,0.000312788,8.30007e-07,-3.97034e-09,0.0350227,0.000314436,8.18096e-07,2.68985e-09,0.035338,0.00031608,8.26166e-07,6.61676e-10,0.0356549,0.000317734,8.28151e-07,-1.61123e-09,0.0359734,0.000319386,8.23317e-07,2.05786e-09,0.0362936,0.000321038,8.29491e-07,8.30388e-10,0.0366155,0.0003227,8.31982e-07,-1.65424e-09,0.036939,0.000324359,8.27019e-07,2.06129e-09,0.0372642,0.000326019,8.33203e-07,8.59719e-10,0.0375911,0.000327688,8.35782e-07,-1.77488e-09,0.0379196,0.000329354,8.30458e-07,2.51464e-09,0.0382498,0.000331023,8.38002e-07,-8.33135e-10,0.0385817,0.000332696,8.35502e-07,8.17825e-10,0.0389152,0.00033437,8.37956e-07,1.28718e-09,0.0392504,0.00033605,8.41817e-07,-2.2413e-09,0.0395873,0.000337727,8.35093e-07,3.95265e-09,0.0399258,0.000339409,8.46951e-07,-2.39332e-09,0.0402661,0.000341095,8.39771e-07,1.89533e-09,0.040608,0.000342781,8.45457e-07,-1.46271e-09,0.0409517,0.000344467,8.41069e-07,3.95554e-09,0.041297,0.000346161,8.52936e-07,-3.18369e-09,0.041644,0.000347857,8.43385e-07,1.32873e-09,0.0419927,0.000349548,8.47371e-07,1.59402e-09,0.0423431,0.000351248,8.52153e-07,-2.54336e-10,0.0426952,0.000352951,8.5139e-07,-5.76676e-10,0.043049,0.000354652,8.4966e-07,2.56114e-09,0.0434045,0.000356359,8.57343e-07,-2.21744e-09,0.0437617,0.000358067,8.50691e-07,2.58344e-09,0.0441206,0.000359776,8.58441e-07,-6.65826e-10,0.0444813,0.000361491,8.56444e-07,7.99218e-11,0.0448436,0.000363204,8.56684e-07,3.46063e-10,0.0452077,0.000364919,8.57722e-07,2.26116e-09,0.0455734,0.000366641,8.64505e-07,-1.94005e-09,0.045941,0.000368364,8.58685e-07,1.77384e-09,0.0463102,0.000370087,8.64007e-07,-1.43005e-09,0.0466811,0.000371811,8.59717e-07,3.94634e-09,0.0470538,0.000373542,8.71556e-07,-3.17946e-09,0.0474282,0.000375276,8.62017e-07,1.32104e-09,0.0478043,0.000377003,8.6598e-07,1.62045e-09,0.0481822,0.00037874,8.70842e-07,-3.52297e-10,0.0485618,0.000380481,8.69785e-07,-2.11211e-10,0.0489432,0.00038222,8.69151e-07,1.19716e-09,0.0493263,0.000383962,8.72743e-07,-8.52026e-10,0.0497111,0.000385705,8.70187e-07,2.21092e-09,0.0500977,0.000387452,8.76819e-07,-5.41339e-10,0.050486,0.000389204,8.75195e-07,-4.5361e-11,0.0508761,0.000390954,8.75059e-07,7.22669e-10,0.0512679,0.000392706,8.77227e-07,8.79936e-10,0.0516615,0.000394463,8.79867e-07,-5.17048e-10,0.0520568,0.000396222,8.78316e-07,1.18833e-09,0.0524539,0.000397982,8.81881e-07,-5.11022e-10,0.0528528,0.000399744,8.80348e-07,8.55683e-10,0.0532534,0.000401507,8.82915e-07,8.13562e-10,0.0536558,0.000403276,8.85356e-07,-3.84603e-10,0.05406,0.000405045,8.84202e-07,7.24962e-10,0.0544659,0.000406816,8.86377e-07,1.20986e-09,0.0548736,0.000408592,8.90006e-07,-1.83896e-09,0.0552831,0.000410367,8.84489e-07,2.42071e-09,0.0556944,0.000412143,8.91751e-07,-3.93413e-10,0.0561074,0.000413925,8.90571e-07,-8.46967e-10,0.0565222,0.000415704,8.8803e-07,3.78122e-09,0.0569388,0.000417491,8.99374e-07,-3.1021e-09,0.0573572,0.000419281,8.90068e-07,1.17658e-09,0.0577774,0.000421064,8.93597e-07,2.12117e-09,0.0581993,0.000422858,8.99961e-07,-2.21068e-09,0.0586231,0.000424651,8.93329e-07,2.9961e-09,0.0590486,0.000426447,9.02317e-07,-2.32311e-09,0.059476,0.000428244,8.95348e-07,2.57122e-09,0.0599051,0.000430043,9.03062e-07,-5.11098e-10,0.0603361,0.000431847,9.01528e-07,-5.27166e-10,0.0607688,0.000433649,8.99947e-07,2.61984e-09,0.0612034,0.000435457,9.07806e-07,-2.50141e-09,0.0616397,0.000437265,9.00302e-07,3.66045e-09,0.0620779,0.000439076,9.11283e-07,-4.68977e-09,0.0625179,0.000440885,8.97214e-07,7.64783e-09,0.0629597,0.000442702,9.20158e-07,-7.27499e-09,0.0634033,0.000444521,8.98333e-07,6.55113e-09,0.0638487,0.000446337,9.17986e-07,-4.02844e-09,0.0642959,0.000448161,9.05901e-07,2.11196e-09,0.064745,0.000449979,9.12236e-07,3.03125e-09,0.0651959,0.000451813,9.2133e-07,-6.78648e-09,0.0656486,0.000453635,9.00971e-07,9.21375e-09,0.0661032,0.000455464,9.28612e-07,-7.71684e-09,0.0665596,0.000457299,9.05462e-07,6.7522e-09,0.0670178,0.00045913,9.25718e-07,-4.3907e-09,0.0674778,0.000460968,9.12546e-07,3.36e-09,0.0679397,0.000462803,9.22626e-07,-1.59876e-09,0.0684034,0.000464644,9.1783e-07,3.0351e-09,0.068869,0.000466488,9.26935e-07,-3.09101e-09,0.0693364,0.000468333,9.17662e-07,1.8785e-09,0.0698057,0.000470174,9.23298e-07,3.02733e-09,0.0702768,0.00047203,9.3238e-07,-6.53722e-09,0.0707497,0.000473875,9.12768e-07,8.22054e-09,0.0712245,0.000475725,9.37429e-07,-3.99325e-09,0.0717012,0.000477588,9.2545e-07,3.01839e-10,0.0721797,0.00047944,9.26355e-07,2.78597e-09,0.0726601,0.000481301,9.34713e-07,-3.99507e-09,0.0731423,0.000483158,9.22728e-07,5.7435e-09,0.0736264,0.000485021,9.39958e-07,-4.07776e-09,0.0741123,0.000486888,9.27725e-07,3.11695e-09,0.0746002,0.000488753,9.37076e-07,-9.39394e-10,0.0750898,0.000490625,9.34258e-07,6.4055e-10,0.0755814,0.000492495,9.3618e-07,-1.62265e-09,0.0760748,0.000494363,9.31312e-07,5.84995e-09,0.0765701,0.000496243,9.48861e-07,-6.87601e-09,0.0770673,0.00049812,9.28233e-07,6.75296e-09,0.0775664,0.000499997,9.48492e-07,-5.23467e-09,0.0780673,0.000501878,9.32788e-07,6.73523e-09,0.0785701,0.000503764,9.52994e-07,-6.80514e-09,0.0790748,0.000505649,9.32578e-07,5.5842e-09,0.0795814,0.000507531,9.49331e-07,-6.30583e-10,0.0800899,0.000509428,9.47439e-07,-3.0618e-09,0.0806003,0.000511314,9.38254e-07,5.4273e-09,0.0811125,0.000513206,9.54536e-07,-3.74627e-09,0.0816267,0.000515104,9.43297e-07,2.10713e-09,0.0821427,0.000516997,9.49618e-07,2.76839e-09,0.0826607,0.000518905,9.57924e-07,-5.73006e-09,0.0831805,0.000520803,9.40733e-07,5.25072e-09,0.0837023,0.0005227,9.56486e-07,-3.71718e-10,0.084226,0.000524612,9.5537e-07,-3.76404e-09,0.0847515,0.000526512,9.44078e-07,7.97735e-09,0.085279,0.000528424,9.6801e-07,-5.79367e-09,0.0858084,0.000530343,9.50629e-07,2.96268e-10,0.0863397,0.000532245,9.51518e-07,4.6086e-09,0.0868729,0.000534162,9.65344e-07,-3.82947e-09,0.087408,0.000536081,9.53856e-07,3.25861e-09,0.087945,0.000537998,9.63631e-07,-1.7543e-09,0.088484,0.00053992,9.58368e-07,3.75849e-09,0.0890249,0.000541848,9.69644e-07,-5.82891e-09,0.0895677,0.00054377,9.52157e-07,4.65593e-09,0.0901124,0.000545688,9.66125e-07,2.10643e-09,0.0906591,0.000547627,9.72444e-07,-5.63099e-09,0.0912077,0.000549555,9.55551e-07,5.51627e-09,0.0917582,0.000551483,9.721e-07,-1.53292e-09,0.0923106,0.000553422,9.67501e-07,6.15311e-10,0.092865,0.000555359,9.69347e-07,-9.28291e-10,0.0934213,0.000557295,9.66562e-07,3.09774e-09,0.0939796,0.000559237,9.75856e-07,-4.01186e-09,0.0945398,0.000561177,9.6382e-07,5.49892e-09,0.095102,0.000563121,9.80317e-07,-3.08258e-09,0.0956661,0.000565073,9.71069e-07,-6.19176e-10,0.0962321,0.000567013,9.69212e-07,5.55932e-09,0.0968001,0.000568968,9.8589e-07,-6.71704e-09,0.09737,0.00057092,9.65738e-07,6.40762e-09,0.0979419,0.00057287,9.84961e-07,-4.0122e-09,0.0985158,0.000574828,9.72925e-07,2.19059e-09,0.0990916,0.000576781,9.79496e-07,2.70048e-09,0.0996693,0.000578748,9.87598e-07,-5.54193e-09,0.100249,0.000580706,9.70972e-07,4.56597e-09,0.100831,0.000582662,9.8467e-07,2.17923e-09,0.101414,0.000584638,9.91208e-07,-5.83232e-09,0.102,0.000586603,9.73711e-07,6.24884e-09,0.102588,0.000588569,9.92457e-07,-4.26178e-09,0.103177,0.000590541,9.79672e-07,3.34781e-09,0.103769,0.00059251,9.89715e-07,-1.67904e-09,0.104362,0.000594485,9.84678e-07,3.36839e-09,0.104958,0.000596464,9.94783e-07,-4.34397e-09,0.105555,0.000598441,9.81751e-07,6.55696e-09,0.106155,0.000600424,1.00142e-06,-6.98272e-09,0.106756,0.000602406,9.80474e-07,6.4728e-09,0.107359,0.000604386,9.99893e-07,-4.00742e-09,0.107965,0.000606374,9.8787e-07,2.10654e-09,0.108572,0.000608356,9.9419e-07,3.0318e-09,0.109181,0.000610353,1.00329e-06,-6.7832e-09,0.109793,0.00061234,9.82936e-07,9.1998e-09,0.110406,0.000614333,1.01054e-06,-7.6642e-09,0.111021,0.000616331,9.87543e-07,6.55579e-09,0.111639,0.000618326,1.00721e-06,-3.65791e-09,0.112258,0.000620329,9.96236e-07,6.25467e-10,0.112879,0.000622324,9.98113e-07,1.15593e-09,0.113503,0.000624323,1.00158e-06,2.20158e-09,0.114128,0.000626333,1.00819e-06,-2.51191e-09,0.114755,0.000628342,1.00065e-06,3.95517e-10,0.115385,0.000630345,1.00184e-06,9.29807e-10,0.116016,0.000632351,1.00463e-06,3.33599e-09,0.116649,0.00063437,1.01463e-06,-6.82329e-09,0.117285,0.000636379,9.94163e-07,9.05595e-09,0.117922,0.000638395,1.02133e-06,-7.04862e-09,0.118562,0.000640416,1.00019e-06,4.23737e-09,0.119203,0.000642429,1.0129e-06,-2.45033e-09,0.119847,0.000644448,1.00555e-06,5.56395e-09,0.120492,0.000646475,1.02224e-06,-4.9043e-09,0.121139,0.000648505,1.00753e-06,-8.47952e-10,0.121789,0.000650518,1.00498e-06,8.29622e-09,0.122441,0.000652553,1.02987e-06,-9.98538e-09,0.123094,0.000654582,9.99914e-07,9.2936e-09,0.12375,0.00065661,1.02779e-06,-4.83707e-09,0.124407,0.000658651,1.01328e-06,2.60411e-09,0.125067,0.000660685,1.0211e-06,-5.57945e-09,0.125729,0.000662711,1.00436e-06,1.22631e-08,0.126392,0.000664756,1.04115e-06,-1.36704e-08,0.127058,0.000666798,1.00014e-06,1.26161e-08,0.127726,0.000668836,1.03798e-06,-6.99155e-09,0.128396,0.000670891,1.01701e-06,4.48836e-10,0.129068,0.000672926,1.01836e-06,5.19606e-09,0.129742,0.000674978,1.03394e-06,-6.3319e-09,0.130418,0.000677027,1.01495e-06,5.2305e-09,0.131096,0.000679073,1.03064e-06,3.11123e-10,0.131776,0.000681135,1.03157e-06,-6.47511e-09,0.132458,0.000683179,1.01215e-06,1.06882e-08,0.133142,0.000685235,1.04421e-06,-6.47519e-09,0.133829,0.000687304,1.02479e-06,3.11237e-10,0.134517,0.000689355,1.02572e-06,5.23035e-09,0.135207,0.000691422,1.04141e-06,-6.3316e-09,0.1359,0.000693486,1.02242e-06,5.19484e-09,0.136594,0.000695546,1.038e-06,4.53497e-10,0.137291,0.000697623,1.03936e-06,-7.00891e-09,0.137989,0.000699681,1.01834e-06,1.2681e-08,0.13869,0.000701756,1.05638e-06,-1.39128e-08,0.139393,0.000703827,1.01464e-06,1.31679e-08,0.140098,0.000705896,1.05414e-06,-8.95659e-09,0.140805,0.000707977,1.02727e-06,7.75742e-09,0.141514,0.000710055,1.05055e-06,-7.17182e-09,0.142225,0.000712135,1.02903e-06,6.02862e-09,0.142938,0.000714211,1.04712e-06,-2.04163e-09,0.143653,0.000716299,1.04099e-06,2.13792e-09,0.144371,0.000718387,1.04741e-06,-6.51009e-09,0.14509,0.000720462,1.02787e-06,9.00123e-09,0.145812,0.000722545,1.05488e-06,3.07523e-10,0.146535,0.000724656,1.0558e-06,-1.02312e-08,0.147261,0.000726737,1.02511e-06,1.0815e-08,0.147989,0.000728819,1.05755e-06,-3.22681e-09,0.148719,0.000730925,1.04787e-06,2.09244e-09,0.14945,0.000733027,1.05415e-06,-5.143e-09,0.150185,0.00073512,1.03872e-06,3.57844e-09,0.150921,0.000737208,1.04946e-06,5.73027e-09,0.151659,0.000739324,1.06665e-06,-1.15983e-08,0.152399,0.000741423,1.03185e-06,1.08605e-08,0.153142,0.000743519,1.06443e-06,-2.04106e-09,0.153886,0.000745642,1.05831e-06,-2.69642e-09,0.154633,0.00074775,1.05022e-06,-2.07425e-09,0.155382,0.000749844,1.044e-06,1.09934e-08,0.156133,0.000751965,1.07698e-06,-1.20972e-08,0.156886,0.000754083,1.04069e-06,7.59288e-09,0.157641,0.000756187,1.06347e-06,-3.37305e-09,0.158398,0.000758304,1.05335e-06,5.89921e-09,0.159158,0.000760428,1.07104e-06,-5.32248e-09,0.159919,0.000762554,1.05508e-06,4.8927e-10,0.160683,0.000764666,1.05654e-06,3.36547e-09,0.161448,0.000766789,1.06664e-06,9.50081e-10,0.162216,0.000768925,1.06949e-06,-7.16568e-09,0.162986,0.000771043,1.04799e-06,1.28114e-08,0.163758,0.000773177,1.08643e-06,-1.42774e-08,0.164533,0.000775307,1.0436e-06,1.44956e-08,0.165309,0.000777438,1.08708e-06,-1.39025e-08,0.166087,0.00077957,1.04538e-06,1.13118e-08,0.166868,0.000781695,1.07931e-06,-1.54224e-09,0.167651,0.000783849,1.07468e-06,-5.14312e-09,0.168436,0.000785983,1.05925e-06,7.21381e-09,0.169223,0.000788123,1.0809e-06,-8.81096e-09,0.170012,0.000790259,1.05446e-06,1.31289e-08,0.170803,0.000792407,1.09385e-06,-1.39022e-08,0.171597,0.000794553,1.05214e-06,1.26775e-08,0.172392,0.000796695,1.09018e-06,-7.00557e-09,0.17319,0.000798855,1.06916e-06,4.43796e-10,0.17399,0.000800994,1.07049e-06,5.23031e-09,0.174792,0.000803151,1.08618e-06,-6.46397e-09,0.175596,0.000805304,1.06679e-06,5.72444e-09,0.176403,0.000807455,1.08396e-06,-1.53254e-09,0.177211,0.000809618,1.07937e-06,4.05673e-10,0.178022,0.000811778,1.08058e-06,-9.01916e-11,0.178835,0.000813939,1.08031e-06,-4.49821e-11,0.17965,0.000816099,1.08018e-06,2.70234e-10,0.180467,0.00081826,1.08099e-06,-1.03603e-09,0.181286,0.000820419,1.07788e-06,3.87392e-09,0.182108,0.000822587,1.0895e-06,4.41522e-10,0.182932,0.000824767,1.09083e-06,-5.63997e-09,0.183758,0.000826932,1.07391e-06,7.21707e-09,0.184586,0.000829101,1.09556e-06,-8.32718e-09,0.185416,0.000831267,1.07058e-06,1.11907e-08,0.186248,0.000833442,1.10415e-06,-6.63336e-09,0.187083,0.00083563,1.08425e-06,4.41484e-10,0.187919,0.0008378,1.08557e-06,4.86754e-09,0.188758,0.000839986,1.10017e-06,-5.01041e-09,0.189599,0.000842171,1.08514e-06,2.72811e-10,0.190443,0.000844342,1.08596e-06,3.91916e-09,0.191288,0.000846526,1.09772e-06,-1.04819e-09,0.192136,0.000848718,1.09457e-06,2.73531e-10,0.192985,0.000850908,1.0954e-06,-4.58916e-11,0.193837,0.000853099,1.09526e-06,-9.01158e-11,0.194692,0.000855289,1.09499e-06,4.06506e-10,0.195548,0.00085748,1.09621e-06,-1.53595e-09,0.196407,0.000859668,1.0916e-06,5.73717e-09,0.197267,0.000861869,1.10881e-06,-6.51164e-09,0.19813,0.000864067,1.08928e-06,5.40831e-09,0.198995,0.000866261,1.1055e-06,-2.20401e-10,0.199863,0.000868472,1.10484e-06,-4.52652e-09,0.200732,0.000870668,1.09126e-06,3.42508e-09,0.201604,0.000872861,1.10153e-06,5.72762e-09,0.202478,0.000875081,1.11872e-06,-1.14344e-08,0.203354,0.000877284,1.08441e-06,1.02076e-08,0.204233,0.000879484,1.11504e-06,4.06355e-10,0.205113,0.000881715,1.11626e-06,-1.18329e-08,0.205996,0.000883912,1.08076e-06,1.71227e-08,0.206881,0.000886125,1.13213e-06,-1.19546e-08,0.207768,0.000888353,1.09626e-06,8.93465e-10,0.208658,0.000890548,1.09894e-06,8.38062e-09,0.209549,0.000892771,1.12408e-06,-4.61353e-09,0.210443,0.000895006,1.11024e-06,-4.82756e-09,0.211339,0.000897212,1.09576e-06,9.02245e-09,0.212238,0.00089943,1.12283e-06,-1.45997e-09,0.213138,0.000901672,1.11845e-06,-3.18255e-09,0.214041,0.000903899,1.1089e-06,-7.11073e-10,0.214946,0.000906115,1.10677e-06,6.02692e-09,0.215853,0.000908346,1.12485e-06,-8.49548e-09,0.216763,0.00091057,1.09936e-06,1.30537e-08,0.217675,0.000912808,1.13852e-06,-1.3917e-08,0.218588,0.000915044,1.09677e-06,1.28121e-08,0.219505,0.000917276,1.13521e-06,-7.5288e-09,0.220423,0.000919523,1.11262e-06,2.40205e-09,0.221344,0.000921756,1.11983e-06,-2.07941e-09,0.222267,0.000923989,1.11359e-06,5.91551e-09,0.223192,0.000926234,1.13134e-06,-6.68149e-09,0.224119,0.000928477,1.11129e-06,5.90929e-09,0.225049,0.000930717,1.12902e-06,-2.05436e-09,0.22598,0.000932969,1.12286e-06,2.30807e-09,0.226915,0.000935222,1.12978e-06,-7.17796e-09,0.227851,0.00093746,1.10825e-06,1.15028e-08,0.228789,0.000939711,1.14276e-06,-9.03083e-09,0.22973,0.000941969,1.11566e-06,9.71932e-09,0.230673,0.00094423,1.14482e-06,-1.49452e-08,0.231619,0.000946474,1.09998e-06,2.02591e-08,0.232566,0.000948735,1.16076e-06,-2.13879e-08,0.233516,0.000950993,1.0966e-06,2.05888e-08,0.234468,0.000953247,1.15837e-06,-1.62642e-08,0.235423,0.000955515,1.10957e-06,1.46658e-08,0.236379,0.000957779,1.15357e-06,-1.25966e-08,0.237338,0.000960048,1.11578e-06,5.91793e-09,0.238299,0.000962297,1.13353e-06,3.82602e-09,0.239263,0.000964576,1.14501e-06,-6.3208e-09,0.240229,0.000966847,1.12605e-06,6.55613e-09,0.241197,0.000969119,1.14572e-06,-5.00268e-09,0.242167,0.000971395,1.13071e-06,-1.44659e-09,0.243139,0.000973652,1.12637e-06,1.07891e-08,0.244114,0.000975937,1.15874e-06,-1.19073e-08,0.245091,0.000978219,1.12302e-06,7.03782e-09,0.246071,0.000980486,1.14413e-06,-1.34276e-09,0.247052,0.00098277,1.1401e-06,-1.66669e-09,0.248036,0.000985046,1.1351e-06,8.00935e-09,0.249022,0.00098734,1.15913e-06,-1.54694e-08,0.250011,0.000989612,1.11272e-06,2.4066e-08,0.251002,0.000991909,1.18492e-06,-2.11901e-08,0.251995,0.000994215,1.12135e-06,1.08973e-09,0.25299,0.000996461,1.12462e-06,1.68311e-08,0.253988,0.000998761,1.17511e-06,-8.8094e-09,0.254987,0.00100109,1.14868e-06,-1.13958e-08,0.25599,0.00100335,1.1145e-06,2.45902e-08,0.256994,0.00100565,1.18827e-06,-2.73603e-08,0.258001,0.00100795,1.10618e-06,2.52464e-08,0.25901,0.00101023,1.18192e-06,-1.40207e-08,0.260021,0.00101256,1.13986e-06,1.03387e-09,0.261035,0.00101484,1.14296e-06,9.8853e-09,0.262051,0.00101715,1.17262e-06,-1.07726e-08,0.263069,0.00101947,1.1403e-06,3.40272e-09,0.26409,0.00102176,1.15051e-06,-2.83827e-09,0.265113,0.00102405,1.142e-06,7.95039e-09,0.266138,0.00102636,1.16585e-06,8.39047e-10,0.267166,0.00102869,1.16836e-06,-1.13066e-08,0.268196,0.00103099,1.13444e-06,1.4585e-08,0.269228,0.00103331,1.1782e-06,-1.72314e-08,0.270262,0.00103561,1.1265e-06,2.45382e-08,0.271299,0.00103794,1.20012e-06,-2.13166e-08,0.272338,0.00104028,1.13617e-06,1.12364e-09,0.273379,0.00104255,1.13954e-06,1.68221e-08,0.274423,0.00104488,1.19001e-06,-8.80736e-09,0.275469,0.00104723,1.16358e-06,-1.13948e-08,0.276518,0.00104953,1.1294e-06,2.45839e-08,0.277568,0.00105186,1.20315e-06,-2.73361e-08,0.278621,0.00105418,1.12114e-06,2.51559e-08,0.279677,0.0010565,1.19661e-06,-1.36832e-08,0.280734,0.00105885,1.15556e-06,-2.25706e-10,0.281794,0.00106116,1.15488e-06,1.45862e-08,0.282857,0.00106352,1.19864e-06,-2.83167e-08,0.283921,0.00106583,1.11369e-06,3.90759e-08,0.284988,0.00106817,1.23092e-06,-3.85801e-08,0.286058,0.00107052,1.11518e-06,2.58375e-08,0.287129,0.00107283,1.19269e-06,-5.16498e-09,0.288203,0.0010752,1.1772e-06,-5.17768e-09,0.28928,0.00107754,1.16167e-06,-3.92671e-09,0.290358,0.00107985,1.14988e-06,2.08846e-08,0.29144,0.00108221,1.21254e-06,-2.00072e-08,0.292523,0.00108458,1.15252e-06,-4.60659e-10,0.293609,0.00108688,1.15114e-06,2.18499e-08,0.294697,0.00108925,1.21669e-06,-2.73343e-08,0.295787,0.0010916,1.13468e-06,2.78826e-08,0.29688,0.00109395,1.21833e-06,-2.45915e-08,0.297975,0.00109632,1.14456e-06,1.08787e-08,0.299073,0.00109864,1.17719e-06,1.08788e-08,0.300172,0.00110102,1.20983e-06,-2.45915e-08,0.301275,0.00110337,1.13605e-06,2.78828e-08,0.302379,0.00110573,1.2197e-06,-2.73348e-08,0.303486,0.00110808,1.1377e-06,2.18518e-08,0.304595,0.00111042,1.20325e-06,-4.67556e-10,0.305707,0.00111283,1.20185e-06,-1.99816e-08,0.306821,0.00111517,1.14191e-06,2.07891e-08,0.307937,0.00111752,1.20427e-06,-3.57026e-09,0.309056,0.00111992,1.19356e-06,-6.50797e-09,0.310177,0.00112228,1.17404e-06,-2.00165e-10,0.3113,0.00112463,1.17344e-06,7.30874e-09,0.312426,0.001127,1.19536e-06,7.67424e-10,0.313554,0.00112939,1.19767e-06,-1.03784e-08,0.314685,0.00113176,1.16653e-06,1.09437e-08,0.315818,0.00113412,1.19936e-06,-3.59406e-09,0.316953,0.00113651,1.18858e-06,3.43251e-09,0.318091,0.0011389,1.19888e-06,-1.0136e-08,0.319231,0.00114127,1.16847e-06,7.30915e-09,0.320374,0.00114363,1.1904e-06,1.07018e-08,0.321518,0.00114604,1.2225e-06,-2.03137e-08,0.322666,0.00114842,1.16156e-06,1.09484e-08,0.323815,0.00115078,1.19441e-06,6.32224e-09,0.324967,0.00115319,1.21337e-06,-6.43509e-09,0.326122,0.00115559,1.19407e-06,-1.03842e-08,0.327278,0.00115795,1.16291e-06,1.81697e-08,0.328438,0.00116033,1.21742e-06,-2.6901e-09,0.329599,0.00116276,1.20935e-06,-7.40939e-09,0.330763,0.00116515,1.18713e-06,2.52533e-09,0.331929,0.00116754,1.1947e-06,-2.69191e-09,0.333098,0.00116992,1.18663e-06,8.24218e-09,0.334269,0.00117232,1.21135e-06,-4.74377e-10,0.335443,0.00117474,1.20993e-06,-6.34471e-09,0.336619,0.00117714,1.1909e-06,-3.94922e-09,0.337797,0.00117951,1.17905e-06,2.21417e-08,0.338978,0.00118193,1.24547e-06,-2.50128e-08,0.340161,0.00118435,1.17043e-06,1.8305e-08,0.341346,0.00118674,1.22535e-06,-1.84048e-08,0.342534,0.00118914,1.17013e-06,2.55121e-08,0.343725,0.00119156,1.24667e-06,-2.40389e-08,0.344917,0.00119398,1.17455e-06,1.10389e-08,0.346113,0.00119636,1.20767e-06,9.68574e-09,0.34731,0.0011988,1.23673e-06,-1.99797e-08,0.34851,0.00120122,1.17679e-06,1.06284e-08,0.349713,0.0012036,1.20867e-06,7.26868e-09,0.350917,0.00120604,1.23048e-06,-9.90072e-09,0.352125,0.00120847,1.20078e-06,2.53177e-09,0.353334,0.00121088,1.20837e-06,-2.26199e-10,0.354546,0.0012133,1.20769e-06,-1.62705e-09,0.355761,0.00121571,1.20281e-06,6.73435e-09,0.356978,0.00121813,1.22302e-06,4.49207e-09,0.358197,0.00122059,1.23649e-06,-2.47027e-08,0.359419,0.00122299,1.16238e-06,3.47142e-08,0.360643,0.00122542,1.26653e-06,-2.47472e-08,0.36187,0.00122788,1.19229e-06,4.66965e-09,0.363099,0.00123028,1.20629e-06,6.06872e-09,0.36433,0.00123271,1.2245e-06,8.57729e-10,0.365564,0.00123516,1.22707e-06,-9.49952e-09,0.366801,0.00123759,1.19858e-06,7.33792e-09,0.36804,0.00124001,1.22059e-06,9.95025e-09,0.369281,0.00124248,1.25044e-06,-1.73366e-08,0.370525,0.00124493,1.19843e-06,-2.08464e-10,0.371771,0.00124732,1.1978e-06,1.81704e-08,0.373019,0.00124977,1.25232e-06,-1.28683e-08,0.37427,0.00125224,1.21371e-06,3.50042e-09,0.375524,0.00125468,1.22421e-06,-1.1335e-09,0.37678,0.00125712,1.22081e-06,1.03345e-09,0.378038,0.00125957,1.22391e-06,-3.00023e-09,0.379299,0.00126201,1.21491e-06,1.09676e-08,0.380562,0.00126447,1.24781e-06,-1.10676e-08,0.381828,0.00126693,1.21461e-06,3.50042e-09,0.383096,0.00126937,1.22511e-06,-2.93403e-09,0.384366,0.00127181,1.21631e-06,8.23574e-09,0.385639,0.00127427,1.24102e-06,-2.06607e-10,0.386915,0.00127675,1.2404e-06,-7.40935e-09,0.388193,0.00127921,1.21817e-06,4.1761e-11,0.389473,0.00128165,1.21829e-06,7.24223e-09,0.390756,0.0012841,1.24002e-06,7.91564e-10,0.392042,0.00128659,1.2424e-06,-1.04086e-08,0.393329,0.00128904,1.21117e-06,1.10405e-08,0.39462,0.0012915,1.24429e-06,-3.951e-09,0.395912,0.00129397,1.23244e-06,4.7634e-09,0.397208,0.00129645,1.24673e-06,-1.51025e-08,0.398505,0.0012989,1.20142e-06,2.58443e-08,0.399805,0.00130138,1.27895e-06,-2.86702e-08,0.401108,0.00130385,1.19294e-06,2.92318e-08,0.402413,0.00130632,1.28064e-06,-2.86524e-08,0.403721,0.0013088,1.19468e-06,2.57731e-08,0.405031,0.00131127,1.272e-06,-1.48355e-08,0.406343,0.00131377,1.2275e-06,3.76652e-09,0.407658,0.00131623,1.23879e-06,-2.30784e-10,0.408976,0.00131871,1.2381e-06,-2.84331e-09,0.410296,0.00132118,1.22957e-06,1.16041e-08,0.411618,0.00132367,1.26438e-06,-1.37708e-08,0.412943,0.00132616,1.22307e-06,1.36768e-08,0.41427,0.00132865,1.2641e-06,-1.1134e-08,0.4156,0.00133114,1.2307e-06,1.05714e-09,0.416933,0.00133361,1.23387e-06,6.90538e-09,0.418267,0.00133609,1.25459e-06,1.12372e-09,0.419605,0.00133861,1.25796e-06,-1.14002e-08,0.420945,0.00134109,1.22376e-06,1.46747e-08,0.422287,0.00134358,1.26778e-06,-1.7496e-08,0.423632,0.00134606,1.21529e-06,2.5507e-08,0.424979,0.00134857,1.29182e-06,-2.49272e-08,0.426329,0.00135108,1.21703e-06,1.45972e-08,0.427681,0.00135356,1.26083e-06,-3.65935e-09,0.429036,0.00135607,1.24985e-06,4.00178e-11,0.430393,0.00135857,1.24997e-06,3.49917e-09,0.431753,0.00136108,1.26047e-06,-1.40366e-08,0.433116,0.00136356,1.21836e-06,2.28448e-08,0.43448,0.00136606,1.28689e-06,-1.77378e-08,0.435848,0.00136858,1.23368e-06,1.83043e-08,0.437218,0.0013711,1.28859e-06,-2.56769e-08,0.43859,0.0013736,1.21156e-06,2.47987e-08,0.439965,0.0013761,1.28595e-06,-1.39133e-08,0.441342,0.00137863,1.24421e-06,1.05202e-09,0.442722,0.00138112,1.24737e-06,9.70507e-09,0.444104,0.00138365,1.27649e-06,-1.00698e-08,0.445489,0.00138617,1.24628e-06,7.72123e-10,0.446877,0.00138867,1.24859e-06,6.98132e-09,0.448267,0.00139118,1.26954e-06,1.10477e-09,0.449659,0.00139373,1.27285e-06,-1.14003e-08,0.451054,0.00139624,1.23865e-06,1.4694e-08,0.452452,0.00139876,1.28273e-06,-1.75734e-08,0.453852,0.00140127,1.23001e-06,2.5797e-08,0.455254,0.00140381,1.3074e-06,-2.60097e-08,0.456659,0.00140635,1.22937e-06,1.86371e-08,0.458067,0.00140886,1.28529e-06,-1.8736e-08,0.459477,0.00141137,1.22908e-06,2.65048e-08,0.46089,0.00141391,1.30859e-06,-2.76784e-08,0.462305,0.00141645,1.22556e-06,2.46043e-08,0.463722,0.00141897,1.29937e-06,-1.11341e-08,0.465143,0.00142154,1.26597e-06,-9.87033e-09,0.466565,0.00142404,1.23636e-06,2.08131e-08,0.467991,0.00142657,1.2988e-06,-1.37773e-08,0.469419,0.00142913,1.25746e-06,4.49378e-09,0.470849,0.00143166,1.27094e-06,-4.19781e-09,0.472282,0.00143419,1.25835e-06,1.22975e-08,0.473717,0.00143674,1.29524e-06,-1.51902e-08,0.475155,0.00143929,1.24967e-06,1.86608e-08,0.476596,0.00144184,1.30566e-06,-2.96506e-08,0.478039,0.00144436,1.2167e-06,4.03368e-08,0.479485,0.00144692,1.33771e-06,-4.22896e-08,0.480933,0.00144947,1.21085e-06,3.94148e-08,0.482384,0.00145201,1.32909e-06,-2.59626e-08,0.483837,0.00145459,1.2512e-06,4.83124e-09,0.485293,0.0014571,1.2657e-06,6.63757e-09,0.486751,0.00145966,1.28561e-06,-1.57911e-09,0.488212,0.00146222,1.28087e-06,-3.21468e-10,0.489676,0.00146478,1.27991e-06,2.86517e-09,0.491142,0.00146735,1.2885e-06,-1.11392e-08,0.49261,0.00146989,1.25508e-06,1.18893e-08,0.494081,0.00147244,1.29075e-06,-6.61574e-09,0.495555,0.001475,1.27091e-06,1.45736e-08,0.497031,0.00147759,1.31463e-06,-2.18759e-08,0.49851,0.00148015,1.249e-06,1.33252e-08,0.499992,0.00148269,1.28897e-06,-1.62277e-09,0.501476,0.00148526,1.28411e-06,-6.83421e-09,0.502962,0.00148781,1.2636e-06,2.89596e-08,0.504451,0.00149042,1.35048e-06,-4.93997e-08,0.505943,0.00149298,1.20228e-06,4.94299e-08,0.507437,0.00149553,1.35057e-06,-2.91107e-08,0.508934,0.00149814,1.26324e-06,7.40848e-09,0.510434,0.00150069,1.28547e-06,-5.23187e-10,0.511936,0.00150326,1.2839e-06,-5.31585e-09,0.51344,0.00150581,1.26795e-06,2.17866e-08,0.514947,0.00150841,1.33331e-06,-2.22257e-08,0.516457,0.00151101,1.26663e-06,7.51178e-09,0.517969,0.00151357,1.28917e-06,-7.82128e-09,0.519484,0.00151613,1.2657e-06,2.37733e-08,0.521002,0.00151873,1.33702e-06,-2.76674e-08,0.522522,0.00152132,1.25402e-06,2.72917e-08,0.524044,0.00152391,1.3359e-06,-2.18949e-08,0.525569,0.00152652,1.27021e-06,6.83372e-10,0.527097,0.00152906,1.27226e-06,1.91613e-08,0.528628,0.00153166,1.32974e-06,-1.77241e-08,0.53016,0.00153427,1.27657e-06,-7.86963e-09,0.531696,0.0015368,1.25296e-06,4.92027e-08,0.533234,0.00153945,1.40057e-06,-6.9732e-08,0.534775,0.00154204,1.19138e-06,5.09114e-08,0.536318,0.00154458,1.34411e-06,-1.4704e-08,0.537864,0.00154722,1.3e-06,7.9048e-09,0.539413,0.00154984,1.32371e-06,-1.69152e-08,0.540964,0.00155244,1.27297e-06,1.51355e-10,0.542517,0.00155499,1.27342e-06,1.63099e-08,0.544074,0.00155758,1.32235e-06,-5.78647e-09,0.545633,0.00156021,1.30499e-06,6.83599e-09,0.547194,0.00156284,1.3255e-06,-2.15575e-08,0.548758,0.00156543,1.26083e-06,1.97892e-08,0.550325,0.00156801,1.32019e-06,2.00525e-09,0.551894,0.00157065,1.32621e-06,-2.78103e-08,0.553466,0.00157322,1.24278e-06,4.96314e-08,0.555041,0.00157586,1.39167e-06,-5.1506e-08,0.556618,0.00157849,1.23716e-06,3.71835e-08,0.558198,0.00158107,1.34871e-06,-3.76233e-08,0.55978,0.00158366,1.23584e-06,5.37052e-08,0.561365,0.00158629,1.39695e-06,-5.79884e-08,0.562953,0.00158891,1.22299e-06,5.90392e-08,0.564543,0.00159153,1.4001e-06,-5.89592e-08,0.566136,0.00159416,1.22323e-06,5.7588e-08,0.567731,0.00159678,1.39599e-06,-5.21835e-08,0.569329,0.00159941,1.23944e-06,3.19369e-08,0.57093,0.00160199,1.33525e-06,-1.59594e-08,0.572533,0.00160461,1.28737e-06,3.19006e-08,0.574139,0.00160728,1.38307e-06,-5.20383e-08,0.575748,0.00160989,1.22696e-06,5.70431e-08,0.577359,0.00161251,1.39809e-06,-5.69247e-08,0.578973,0.00161514,1.22731e-06,5.14463e-08,0.580589,0.00161775,1.38165e-06,-2.9651e-08,0.582208,0.00162042,1.2927e-06,7.55339e-09,0.58383,0.00162303,1.31536e-06,-5.62636e-10,0.585455,0.00162566,1.31367e-06,-5.30281e-09,0.587081,0.00162827,1.29776e-06,2.17738e-08,0.588711,0.00163093,1.36309e-06,-2.21875e-08,0.590343,0.00163359,1.29652e-06,7.37164e-09,0.591978,0.00163621,1.31864e-06,-7.29907e-09,0.593616,0.00163882,1.29674e-06,2.18247e-08,0.595256,0.00164148,1.36221e-06,-2.03952e-08,0.596899,0.00164414,1.30103e-06,1.51241e-10,0.598544,0.00164675,1.30148e-06,1.97902e-08,0.600192,0.00164941,1.36085e-06,-1.97074e-08,0.601843,0.00165207,1.30173e-06,-5.65175e-10,0.603496,0.00165467,1.30004e-06,2.1968e-08,0.605152,0.00165734,1.36594e-06,-2.77024e-08,0.606811,0.00165999,1.28283e-06,2.92369e-08,0.608472,0.00166264,1.37054e-06,-2.96407e-08,0.610136,0.00166529,1.28162e-06,2.97215e-08,0.611803,0.00166795,1.37079e-06,-2.96408e-08,0.613472,0.0016706,1.28186e-06,2.92371e-08,0.615144,0.00167325,1.36957e-06,-2.77031e-08,0.616819,0.00167591,1.28647e-06,2.19708e-08,0.618496,0.00167855,1.35238e-06,-5.75407e-10,0.620176,0.00168125,1.35065e-06,-1.9669e-08,0.621858,0.00168389,1.29164e-06,1.96468e-08,0.623544,0.00168653,1.35058e-06,6.86403e-10,0.625232,0.00168924,1.35264e-06,-2.23924e-08,0.626922,0.00169187,1.28547e-06,2.92788e-08,0.628615,0.00169453,1.3733e-06,-3.51181e-08,0.630311,0.00169717,1.26795e-06,5.15889e-08,0.63201,0.00169987,1.42272e-06,-5.2028e-08,0.633711,0.00170255,1.26663e-06,3.73139e-08,0.635415,0.0017052,1.37857e-06,-3.76227e-08,0.637121,0.00170784,1.2657e-06,5.35722e-08,0.63883,0.00171054,1.42642e-06,-5.74567e-08,0.640542,0.00171322,1.25405e-06,5.70456e-08,0.642257,0.0017159,1.42519e-06,-5.15163e-08,0.643974,0.00171859,1.27064e-06,2.98103e-08,0.645694,0.00172122,1.36007e-06,-8.12016e-09,0.647417,0.00172392,1.33571e-06,2.67039e-09,0.649142,0.0017266,1.34372e-06,-2.56152e-09,0.65087,0.00172928,1.33604e-06,7.57571e-09,0.6526,0.00173197,1.35876e-06,-2.77413e-08,0.654334,0.00173461,1.27554e-06,4.3785e-08,0.65607,0.00173729,1.40689e-06,-2.81896e-08,0.657808,0.00174002,1.32233e-06,9.36893e-09,0.65955,0.00174269,1.35043e-06,-9.28617e-09,0.661294,0.00174536,1.32257e-06,2.77757e-08,0.66304,0.00174809,1.4059e-06,-4.2212e-08,0.66479,0.00175078,1.27926e-06,2.1863e-08,0.666542,0.0017534,1.34485e-06,1.43648e-08,0.668297,0.00175613,1.38795e-06,-1.97177e-08,0.670054,0.00175885,1.3288e-06,4.90115e-09,0.671814,0.00176152,1.3435e-06,1.13232e-10,0.673577,0.00176421,1.34384e-06,-5.3542e-09,0.675343,0.00176688,1.32778e-06,2.13035e-08,0.677111,0.0017696,1.39169e-06,-2.02553e-08,0.678882,0.00177232,1.33092e-06,1.13005e-10,0.680656,0.00177499,1.33126e-06,1.98031e-08,0.682432,0.00177771,1.39067e-06,-1.97211e-08,0.684211,0.00178043,1.33151e-06,-5.2349e-10,0.685993,0.00178309,1.32994e-06,2.18151e-08,0.687777,0.00178582,1.39538e-06,-2.71325e-08,0.689564,0.00178853,1.31398e-06,2.71101e-08,0.691354,0.00179124,1.39531e-06,-2.17035e-08,0.693147,0.00179396,1.3302e-06,9.92865e-11,0.694942,0.00179662,1.3305e-06,2.13063e-08,0.69674,0.00179935,1.39442e-06,-2.57198e-08,0.698541,0.00180206,1.31726e-06,2.19682e-08,0.700344,0.00180476,1.38317e-06,-2.54852e-09,0.70215,0.00180752,1.37552e-06,-1.17741e-08,0.703959,0.00181023,1.3402e-06,-9.95999e-09,0.705771,0.00181288,1.31032e-06,5.16141e-08,0.707585,0.00181566,1.46516e-06,-7.72869e-08,0.709402,0.00181836,1.2333e-06,7.87197e-08,0.711222,0.00182106,1.46946e-06,-5.87781e-08,0.713044,0.00182382,1.29312e-06,3.71834e-08,0.714869,0.00182652,1.40467e-06,-3.03511e-08,0.716697,0.00182924,1.31362e-06,2.46161e-08,0.718528,0.00183194,1.38747e-06,-8.5087e-09,0.720361,0.00183469,1.36194e-06,9.41892e-09,0.722197,0.00183744,1.3902e-06,-2.91671e-08,0.724036,0.00184014,1.3027e-06,4.76448e-08,0.725878,0.00184288,1.44563e-06,-4.22028e-08,0.727722,0.00184565,1.31902e-06,1.95682e-09,0.729569,0.00184829,1.3249e-06,3.43754e-08,0.731419,0.00185104,1.42802e-06,-2.0249e-08,0.733271,0.00185384,1.36727e-06,-1.29838e-08,0.735126,0.00185654,1.32832e-06,1.25794e-08,0.736984,0.00185923,1.36606e-06,2.22711e-08,0.738845,0.00186203,1.43287e-06,-4.20594e-08,0.740708,0.00186477,1.3067e-06,2.67571e-08,0.742574,0.00186746,1.38697e-06,-5.36424e-09,0.744443,0.00187022,1.37087e-06,-5.30023e-09,0.746315,0.00187295,1.35497e-06,2.65653e-08,0.748189,0.00187574,1.43467e-06,-4.13564e-08,0.750066,0.00187848,1.3106e-06,1.9651e-08,0.751946,0.00188116,1.36955e-06,2.23572e-08,0.753828,0.00188397,1.43663e-06,-4.9475e-08,0.755714,0.00188669,1.2882e-06,5.63335e-08,0.757602,0.00188944,1.4572e-06,-5.66499e-08,0.759493,0.00189218,1.28725e-06,5.10567e-08,0.761386,0.00189491,1.44042e-06,-2.83677e-08,0.763283,0.00189771,1.35532e-06,2.80962e-09,0.765182,0.00190042,1.36375e-06,1.71293e-08,0.767083,0.0019032,1.41513e-06,-1.17221e-08,0.768988,0.001906,1.37997e-06,-2.98453e-08,0.770895,0.00190867,1.29043e-06,7.14987e-08,0.772805,0.00191146,1.50493e-06,-7.73354e-08,0.774718,0.00191424,1.27292e-06,5.90292e-08,0.776634,0.00191697,1.45001e-06,-3.9572e-08,0.778552,0.00191975,1.33129e-06,3.9654e-08,0.780473,0.00192253,1.45026e-06,-5.94395e-08,0.782397,0.00192525,1.27194e-06,7.88945e-08,0.784324,0.00192803,1.50862e-06,-7.73249e-08,0.786253,0.00193082,1.27665e-06,5.15913e-08,0.788185,0.00193352,1.43142e-06,-9.83099e-09,0.79012,0.00193636,1.40193e-06,-1.22672e-08,0.792058,0.00193912,1.36513e-06,-7.05275e-10,0.793999,0.00194185,1.36301e-06,1.50883e-08,0.795942,0.00194462,1.40828e-06,-4.33147e-11,0.797888,0.00194744,1.40815e-06,-1.49151e-08,0.799837,0.00195021,1.3634e-06,9.93244e-11,0.801788,0.00195294,1.3637e-06,1.45179e-08,0.803743,0.00195571,1.40725e-06,1.43363e-09,0.8057,0.00195853,1.41155e-06,-2.02525e-08,0.80766,0.00196129,1.35079e-06,1.99718e-08,0.809622,0.00196405,1.41071e-06,-3.01649e-11,0.811588,0.00196687,1.41062e-06,-1.9851e-08,0.813556,0.00196964,1.35107e-06,1.98296e-08,0.815527,0.0019724,1.41056e-06,1.37485e-10,0.817501,0.00197522,1.41097e-06,-2.03796e-08,0.819477,0.00197798,1.34983e-06,2.17763e-08,0.821457,0.00198074,1.41516e-06,-7.12085e-09,0.823439,0.00198355,1.3938e-06,6.70707e-09,0.825424,0.00198636,1.41392e-06,-1.97074e-08,0.827412,0.00198913,1.35479e-06,1.25179e-08,0.829402,0.00199188,1.39235e-06,2.92405e-08,0.831396,0.00199475,1.48007e-06,-6.98755e-08,0.833392,0.0019975,1.27044e-06,7.14477e-08,0.835391,0.00200026,1.48479e-06,-3.71014e-08,0.837392,0.00200311,1.37348e-06,1.73533e-08,0.839397,0.00200591,1.42554e-06,-3.23118e-08,0.841404,0.00200867,1.32861e-06,5.2289e-08,0.843414,0.00201148,1.48547e-06,-5.76348e-08,0.845427,0.00201428,1.31257e-06,5.9041e-08,0.847443,0.00201708,1.48969e-06,-5.93197e-08,0.849461,0.00201988,1.31173e-06,5.90289e-08,0.851482,0.00202268,1.48882e-06,-5.75864e-08,0.853507,0.00202549,1.31606e-06,5.21075e-08,0.855533,0.00202828,1.47238e-06,-3.16344e-08,0.857563,0.00203113,1.37748e-06,1.48257e-08,0.859596,0.00203393,1.42196e-06,-2.76684e-08,0.861631,0.00203669,1.33895e-06,3.62433e-08,0.863669,0.00203947,1.44768e-06,1.90463e-09,0.86571,0.00204237,1.45339e-06,-4.38617e-08,0.867754,0.00204515,1.32181e-06,5.43328e-08,0.8698,0.00204796,1.48481e-06,-5.42603e-08,0.87185,0.00205076,1.32203e-06,4.34989e-08,0.873902,0.00205354,1.45252e-06,-5.26029e-10,0.875957,0.00205644,1.45095e-06,-4.13949e-08,0.878015,0.00205922,1.32676e-06,4.68962e-08,0.880075,0.00206201,1.46745e-06,-2.69807e-08,0.882139,0.00206487,1.38651e-06,1.42181e-09,0.884205,0.00206764,1.39077e-06,2.12935e-08,0.886274,0.00207049,1.45465e-06,-2.69912e-08,0.888346,0.00207332,1.37368e-06,2.70664e-08,0.890421,0.00207615,1.45488e-06,-2.16698e-08,0.892498,0.00207899,1.38987e-06,8.14756e-12,0.894579,0.00208177,1.38989e-06,2.16371e-08,0.896662,0.00208462,1.45481e-06,-2.6952e-08,0.898748,0.00208744,1.37395e-06,2.65663e-08,0.900837,0.00209027,1.45365e-06,-1.97084e-08,0.902928,0.00209312,1.39452e-06,-7.33731e-09,0.905023,0.00209589,1.37251e-06,4.90578e-08,0.90712,0.00209878,1.51968e-06,-6.96845e-08,0.90922,0.00210161,1.31063e-06,5.08664e-08,0.911323,0.00210438,1.46323e-06,-1.45717e-08,0.913429,0.00210727,1.41952e-06,7.42038e-09,0.915538,0.00211013,1.44178e-06,-1.51097e-08,0.917649,0.00211297,1.39645e-06,-6.58618e-09,0.919764,0.00211574,1.37669e-06,4.14545e-08,0.921881,0.00211862,1.50105e-06,-4.00222e-08,0.924001,0.0021215,1.38099e-06,-5.7518e-10,0.926124,0.00212426,1.37926e-06,4.23229e-08,0.92825,0.00212714,1.50623e-06,-4.9507e-08,0.930378,0.00213001,1.35771e-06,3.64958e-08,0.93251,0.00213283,1.4672e-06,-3.68713e-08,0.934644,0.00213566,1.35658e-06,5.13848e-08,0.936781,0.00213852,1.51074e-06,-4.94585e-08,0.938921,0.0021414,1.36236e-06,2.72399e-08,0.941064,0.0021442,1.44408e-06,1.0372e-10,0.943209,0.00214709,1.44439e-06,-2.76547e-08,0.945358,0.0021499,1.36143e-06,5.09106e-08,0.947509,0.00215277,1.51416e-06,-5.67784e-08,0.949663,0.00215563,1.34382e-06,5.69935e-08,0.95182,0.00215849,1.5148e-06,-5.19861e-08,0.95398,0.00216136,1.35885e-06,3.17417e-08,0.956143,0.00216418,1.45407e-06,-1.53758e-08,0.958309,0.00216704,1.40794e-06,2.97615e-08,0.960477,0.00216994,1.49723e-06,-4.40657e-08,0.962649,0.00217281,1.36503e-06,2.72919e-08,0.964823,0.00217562,1.44691e-06,-5.49729e-09,0.967,0.0021785,1.43041e-06,-5.30273e-09,0.96918,0.00218134,1.41451e-06,2.67084e-08,0.971363,0.00218425,1.49463e-06,-4.19265e-08,0.973548,0.00218711,1.36885e-06,2.17881e-08,0.975737,0.00218992,1.43422e-06,1.43789e-08,0.977928,0.00219283,1.47735e-06,-1.96989e-08,0.980122,0.00219572,1.41826e-06,4.81221e-09,0.98232,0.00219857,1.43269e-06,4.50048e-10,0.98452,0.00220144,1.43404e-06,-6.61237e-09,0.986722,0.00220429,1.41421e-06,2.59993e-08,0.988928,0.0022072,1.4922e-06,-3.77803e-08,0.991137,0.00221007,1.37886e-06,5.9127e-09,0.993348,0.00221284,1.3966e-06,1.33339e-07,0.995563,0.00221604,1.79662e-06,-5.98872e-07,0.99778,0.00222015,0.,0.};\n\n        template <bool srgb, int blueIdx, typename T, typename D>\n        __device__ __forceinline__ void RGB2LabConvert_f(const T& src, D& dst)\n        {\n            const float _1_3 = 1.0f / 3.0f;\n            const float _a = 16.0f / 116.0f;\n\n            float B = blueIdx == 0 ? src.x : src.z;\n            float G = src.y;\n            float R = blueIdx == 0 ? src.z : src.x;\n\n            if (srgb)\n            {\n                B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);\n                G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);\n                R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);\n            }\n\n            float X = B * 0.189828f + G * 0.376219f + R * 0.433953f;\n            float Y = B * 0.072169f + G * 0.715160f + R * 0.212671f;\n            float Z = B * 0.872766f + G * 0.109477f + R * 0.017758f;\n\n            float FX = X > 0.008856f ? ::powf(X, _1_3) : (7.787f * X + _a);\n            float FY = Y > 0.008856f ? ::powf(Y, _1_3) : (7.787f * Y + _a);\n            float FZ = Z > 0.008856f ? ::powf(Z, _1_3) : (7.787f * Z + _a);\n\n            float L = Y > 0.008856f ? (116.f * FY - 16.f) : (903.3f * Y);\n            float a = 500.f * (FX - FY);\n            float b = 200.f * (FY - FZ);\n\n            dst.x = L;\n            dst.y = a;\n            dst.z = b;\n        }\n\n        template <typename T, int scn, int dcn, bool srgb, int blueIdx> struct RGB2Lab;\n        template <int scn, int dcn, bool srgb, int blueIdx>\n        struct RGB2Lab<uchar, scn, dcn, srgb, blueIdx>\n            : unary_function<typename TypeVec<uchar, scn>::vec_type, typename TypeVec<uchar, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<uchar, dcn>::vec_type operator ()(const typename TypeVec<uchar, scn>::vec_type& src) const\n            {\n                typename TypeVec<uchar, dcn>::vec_type dst;\n\n                RGB2LabConvert_b<srgb, blueIdx>(src, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB2Lab() {}\n            __host__ __device__ __forceinline__ RGB2Lab(const RGB2Lab&) {}\n        };\n        template <int scn, int dcn, bool srgb, int blueIdx>\n        struct RGB2Lab<float, scn, dcn, srgb, blueIdx>\n            : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const\n            {\n                typename TypeVec<float, dcn>::vec_type dst;\n\n                RGB2LabConvert_f<srgb, blueIdx>(src, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB2Lab() {}\n            __host__ __device__ __forceinline__ RGB2Lab(const RGB2Lab&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(name, scn, dcn, srgb, blueIdx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2Lab<T, scn, dcn, srgb, blueIdx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    namespace color_detail\n    {\n        __constant__ float c_sRGBInvGammaTab[] = {0,0.0126255,0.,-8.33961e-06,0.0126172,0.0126005,-2.50188e-05,4.1698e-05,0.0252344,0.0126756,0.000100075,-0.000158451,0.0378516,0.0124004,-0.000375277,-0.000207393,0.0496693,0.0110276,-0.000997456,0.00016837,0.0598678,0.00953783,-0.000492346,2.07235e-05,0.068934,0.00861531,-0.000430176,3.62876e-05,0.0771554,0.00786382,-0.000321313,1.87625e-05,0.0847167,0.00727748,-0.000265025,1.53594e-05,0.0917445,0.00679351,-0.000218947,1.10545e-05,0.0983301,0.00638877,-0.000185784,8.66984e-06,0.104542,0.00604322,-0.000159774,6.82996e-06,0.110432,0.00574416,-0.000139284,5.51008e-06,0.116042,0.00548212,-0.000122754,4.52322e-06,0.121406,0.00525018,-0.000109184,3.75557e-06,0.126551,0.00504308,-9.79177e-05,3.17134e-06,0.131499,0.00485676,-8.84037e-05,2.68469e-06,0.13627,0.004688,-8.03496e-05,2.31725e-06,0.14088,0.00453426,-7.33978e-05,2.00868e-06,0.145343,0.00439349,-6.73718e-05,1.74775e-06,0.149671,0.00426399,-6.21286e-05,1.53547e-06,0.153875,0.00414434,-5.75222e-05,1.364e-06,0.157963,0.00403338,-5.34301e-05,1.20416e-06,0.161944,0.00393014,-4.98177e-05,1.09114e-06,0.165825,0.00383377,-4.65443e-05,9.57987e-07,0.169613,0.00374356,-4.36703e-05,8.88359e-07,0.173314,0.00365888,-4.10052e-05,7.7849e-07,0.176933,0.00357921,-3.86697e-05,7.36254e-07,0.180474,0.00350408,-3.6461e-05,6.42534e-07,0.183942,0.00343308,-3.45334e-05,6.12614e-07,0.187342,0.00336586,-3.26955e-05,5.42894e-07,0.190675,0.00330209,-3.10669e-05,5.08967e-07,0.193947,0.00324149,-2.954e-05,4.75977e-07,0.197159,0.00318383,-2.8112e-05,4.18343e-07,0.200315,0.00312887,-2.6857e-05,4.13651e-07,0.203418,0.00307639,-2.5616e-05,3.70847e-07,0.206469,0.00302627,-2.45035e-05,3.3813e-07,0.209471,0.00297828,-2.34891e-05,3.32999e-07,0.212426,0.0029323,-2.24901e-05,2.96826e-07,0.215336,0.00288821,-2.15996e-05,2.82736e-07,0.218203,0.00284586,-2.07514e-05,2.70961e-07,0.221029,0.00280517,-1.99385e-05,2.42744e-07,0.223814,0.00276602,-1.92103e-05,2.33277e-07,0.226561,0.0027283,-1.85105e-05,2.2486e-07,0.229271,0.00269195,-1.78359e-05,2.08383e-07,0.231945,0.00265691,-1.72108e-05,1.93305e-07,0.234585,0.00262307,-1.66308e-05,1.80687e-07,0.237192,0.00259035,-1.60888e-05,1.86632e-07,0.239766,0.00255873,-1.55289e-05,1.60569e-07,0.24231,0.00252815,-1.50472e-05,1.54566e-07,0.244823,0.00249852,-1.45835e-05,1.59939e-07,0.247307,0.00246983,-1.41037e-05,1.29549e-07,0.249763,0.00244202,-1.3715e-05,1.41429e-07,0.252191,0.00241501,-1.32907e-05,1.39198e-07,0.254593,0.00238885,-1.28731e-05,1.06444e-07,0.256969,0.00236342,-1.25538e-05,1.2048e-07,0.25932,0.00233867,-1.21924e-05,1.26892e-07,0.261647,0.00231467,-1.18117e-05,8.72084e-08,0.26395,0.00229131,-1.15501e-05,1.20323e-07,0.26623,0.00226857,-1.11891e-05,8.71514e-08,0.268487,0.00224645,-1.09276e-05,9.73165e-08,0.270723,0.00222489,-1.06357e-05,8.98259e-08,0.272937,0.00220389,-1.03662e-05,7.98218e-08,0.275131,0.00218339,-1.01267e-05,9.75254e-08,0.277304,0.00216343,-9.83416e-06,6.65195e-08,0.279458,0.00214396,-9.63461e-06,8.34313e-08,0.281592,0.00212494,-9.38431e-06,7.65919e-08,0.283708,0.00210641,-9.15454e-06,5.7236e-08,0.285805,0.00208827,-8.98283e-06,8.18939e-08,0.287885,0.00207055,-8.73715e-06,6.2224e-08,0.289946,0.00205326,-8.55047e-06,5.66388e-08,0.291991,0.00203633,-8.38056e-06,6.88491e-08,0.294019,0.00201978,-8.17401e-06,5.53955e-08,0.296031,0.00200359,-8.00782e-06,6.71971e-08,0.298027,0.00198778,-7.80623e-06,3.34439e-08,0.300007,0.00197227,-7.7059e-06,6.7248e-08,0.301971,0.00195706,-7.50416e-06,5.51915e-08,0.303921,0.00194221,-7.33858e-06,3.98124e-08,0.305856,0.00192766,-7.21915e-06,5.37795e-08,0.307776,0.00191338,-7.05781e-06,4.30919e-08,0.309683,0.00189939,-6.92853e-06,4.20744e-08,0.311575,0.00188566,-6.80231e-06,5.68321e-08,0.313454,0.00187223,-6.63181e-06,2.86195e-08,0.31532,0.00185905,-6.54595e-06,3.73075e-08,0.317172,0.00184607,-6.43403e-06,6.05684e-08,0.319012,0.00183338,-6.25233e-06,1.84426e-08,0.320839,0.00182094,-6.197e-06,4.44757e-08,0.322654,0.00180867,-6.06357e-06,4.20729e-08,0.324456,0.00179667,-5.93735e-06,2.56511e-08,0.326247,0.00178488,-5.8604e-06,3.41368e-08,0.328026,0.00177326,-5.75799e-06,4.64177e-08,0.329794,0.00176188,-5.61874e-06,1.86107e-08,0.33155,0.0017507,-5.5629e-06,2.81511e-08,0.333295,0.00173966,-5.47845e-06,4.75987e-08,0.335029,0.00172884,-5.33565e-06,1.98726e-08,0.336753,0.00171823,-5.27604e-06,2.19226e-08,0.338466,0.00170775,-5.21027e-06,4.14483e-08,0.340169,0.00169745,-5.08592e-06,2.09017e-08,0.341861,0.00168734,-5.02322e-06,2.39561e-08,0.343543,0.00167737,-4.95135e-06,3.22852e-08,0.345216,0.00166756,-4.85449e-06,2.57173e-08,0.346878,0.00165793,-4.77734e-06,1.38569e-08,0.348532,0.00164841,-4.73577e-06,3.80634e-08,0.350175,0.00163906,-4.62158e-06,1.27043e-08,0.35181,0.00162985,-4.58347e-06,3.03279e-08,0.353435,0.00162078,-4.49249e-06,1.49961e-08,0.355051,0.00161184,-4.4475e-06,2.88977e-08,0.356659,0.00160303,-4.3608e-06,1.84241e-08,0.358257,0.00159436,-4.30553e-06,1.6616e-08,0.359848,0.0015858,-4.25568e-06,3.43218e-08,0.361429,0.00157739,-4.15272e-06,-4.89172e-09,0.363002,0.00156907,-4.16739e-06,4.48498e-08,0.364567,0.00156087,-4.03284e-06,4.30676e-09,0.366124,0.00155282,-4.01992e-06,2.73303e-08,0.367673,0.00154486,-3.93793e-06,5.58036e-09,0.369214,0.001537,-3.92119e-06,3.97554e-08,0.370747,0.00152928,-3.80193e-06,-1.55904e-08,0.372272,0.00152163,-3.8487e-06,5.24081e-08,0.37379,0.00151409,-3.69147e-06,-1.52272e-08,0.375301,0.00150666,-3.73715e-06,3.83028e-08,0.376804,0.0014993,-3.62225e-06,1.10278e-08,0.378299,0.00149209,-3.58916e-06,6.99326e-09,0.379788,0.00148493,-3.56818e-06,2.06038e-08,0.381269,0.00147786,-3.50637e-06,2.98009e-08,0.382744,0.00147093,-3.41697e-06,-2.05978e-08,0.384211,0.00146404,-3.47876e-06,5.25899e-08,0.385672,0.00145724,-3.32099e-06,-1.09471e-08,0.387126,0.00145056,-3.35383e-06,2.10009e-08,0.388573,0.00144392,-3.29083e-06,1.63501e-08,0.390014,0.00143739,-3.24178e-06,3.00641e-09,0.391448,0.00143091,-3.23276e-06,3.12282e-08,0.392875,0.00142454,-3.13908e-06,-8.70932e-09,0.394297,0.00141824,-3.16521e-06,3.34114e-08,0.395712,0.00141201,-3.06497e-06,-5.72754e-09,0.397121,0.00140586,-3.08215e-06,1.9301e-08,0.398524,0.00139975,-3.02425e-06,1.7931e-08,0.39992,0.00139376,-2.97046e-06,-1.61822e-09,0.401311,0.00138781,-2.97531e-06,1.83442e-08,0.402696,0.00138192,-2.92028e-06,1.76485e-08,0.404075,0.00137613,-2.86733e-06,4.68617e-10,0.405448,0.00137039,-2.86593e-06,1.02794e-08,0.406816,0.00136469,-2.83509e-06,1.80179e-08,0.408178,0.00135908,-2.78104e-06,7.05594e-09,0.409534,0.00135354,-2.75987e-06,1.33633e-08,0.410885,0.00134806,-2.71978e-06,-9.04568e-10,0.41223,0.00134261,-2.72249e-06,2.0057e-08,0.41357,0.00133723,-2.66232e-06,1.00841e-08,0.414905,0.00133194,-2.63207e-06,-7.88835e-10,0.416234,0.00132667,-2.63444e-06,2.28734e-08,0.417558,0.00132147,-2.56582e-06,-1.29785e-09,0.418877,0.00131633,-2.56971e-06,1.21205e-08,0.420191,0.00131123,-2.53335e-06,1.24202e-08,0.421499,0.0013062,-2.49609e-06,-2.19681e-09,0.422803,0.0013012,-2.50268e-06,2.61696e-08,0.424102,0.00129628,-2.42417e-06,-1.30747e-08,0.425396,0.00129139,-2.46339e-06,2.6129e-08,0.426685,0.00128654,-2.38501e-06,-2.03454e-09,0.427969,0.00128176,-2.39111e-06,1.18115e-08,0.429248,0.00127702,-2.35567e-06,1.43932e-08,0.430523,0.00127235,-2.31249e-06,-9.77965e-09,0.431793,0.00126769,-2.34183e-06,2.47253e-08,0.433058,0.00126308,-2.26766e-06,2.85278e-10,0.434319,0.00125855,-2.2668e-06,3.93614e-09,0.435575,0.00125403,-2.25499e-06,1.37722e-08,0.436827,0.00124956,-2.21368e-06,5.79803e-10,0.438074,0.00124513,-2.21194e-06,1.37112e-08,0.439317,0.00124075,-2.1708e-06,4.17973e-09,0.440556,0.00123642,-2.15826e-06,-6.27703e-10,0.44179,0.0012321,-2.16015e-06,2.81332e-08,0.44302,0.00122787,-2.07575e-06,-2.24985e-08,0.444246,0.00122365,-2.14324e-06,3.20586e-08,0.445467,0.00121946,-2.04707e-06,-1.6329e-08,0.446685,0.00121532,-2.09605e-06,3.32573e-08,0.447898,0.00121122,-1.99628e-06,-2.72927e-08,0.449107,0.00120715,-2.07816e-06,4.6111e-08,0.450312,0.00120313,-1.93983e-06,-3.79416e-08,0.451514,0.00119914,-2.05365e-06,4.60507e-08,0.452711,0.00119517,-1.9155e-06,-2.7052e-08,0.453904,0.00119126,-1.99666e-06,3.23551e-08,0.455093,0.00118736,-1.89959e-06,-1.29613e-08,0.456279,0.00118352,-1.93848e-06,1.94905e-08,0.45746,0.0011797,-1.88e-06,-5.39588e-09,0.458638,0.00117593,-1.89619e-06,2.09282e-09,0.459812,0.00117214,-1.88991e-06,2.68267e-08,0.460982,0.00116844,-1.80943e-06,-1.99925e-08,0.462149,0.00116476,-1.86941e-06,2.3341e-08,0.463312,0.00116109,-1.79939e-06,-1.37674e-08,0.464471,0.00115745,-1.84069e-06,3.17287e-08,0.465627,0.00115387,-1.7455e-06,-2.37407e-08,0.466779,0.00115031,-1.81673e-06,3.34315e-08,0.467927,0.00114677,-1.71643e-06,-2.05786e-08,0.469073,0.00114328,-1.77817e-06,1.90802e-08,0.470214,0.00113978,-1.72093e-06,3.86247e-09,0.471352,0.00113635,-1.70934e-06,-4.72759e-09,0.472487,0.00113292,-1.72352e-06,1.50478e-08,0.473618,0.00112951,-1.67838e-06,4.14108e-09,0.474746,0.00112617,-1.66595e-06,-1.80986e-09,0.47587,0.00112283,-1.67138e-06,3.09816e-09,0.476991,0.0011195,-1.66209e-06,1.92198e-08,0.478109,0.00111623,-1.60443e-06,-2.03726e-08,0.479224,0.00111296,-1.66555e-06,3.2468e-08,0.480335,0.00110973,-1.56814e-06,-2.00922e-08,0.481443,0.00110653,-1.62842e-06,1.80983e-08,0.482548,0.00110333,-1.57413e-06,7.30362e-09,0.48365,0.0011002,-1.55221e-06,-1.75107e-08,0.484749,0.00109705,-1.60475e-06,3.29373e-08,0.485844,0.00109393,-1.50594e-06,-2.48315e-08,0.486937,0.00109085,-1.58043e-06,3.65865e-08,0.488026,0.0010878,-1.47067e-06,-3.21078e-08,0.489112,0.00108476,-1.56699e-06,3.22397e-08,0.490195,0.00108172,-1.47027e-06,-7.44391e-09,0.491276,0.00107876,-1.49261e-06,-2.46428e-09,0.492353,0.00107577,-1.5e-06,1.73011e-08,0.493427,0.00107282,-1.4481e-06,-7.13552e-09,0.494499,0.0010699,-1.4695e-06,1.1241e-08,0.495567,0.001067,-1.43578e-06,-8.02637e-09,0.496633,0.0010641,-1.45986e-06,2.08645e-08,0.497695,0.00106124,-1.39726e-06,-1.58271e-08,0.498755,0.0010584,-1.44475e-06,1.26415e-08,0.499812,0.00105555,-1.40682e-06,2.48655e-08,0.500866,0.00105281,-1.33222e-06,-5.24988e-08,0.501918,0.00104999,-1.48972e-06,6.59206e-08,0.502966,0.00104721,-1.29196e-06,-3.237e-08,0.504012,0.00104453,-1.38907e-06,3.95479e-09,0.505055,0.00104176,-1.3772e-06,1.65509e-08,0.506096,0.00103905,-1.32755e-06,-1.05539e-08,0.507133,0.00103637,-1.35921e-06,2.56648e-08,0.508168,0.00103373,-1.28222e-06,-3.25007e-08,0.509201,0.00103106,-1.37972e-06,4.47336e-08,0.51023,0.00102844,-1.24552e-06,-2.72245e-08,0.511258,0.00102587,-1.32719e-06,4.55952e-09,0.512282,0.00102323,-1.31352e-06,8.98645e-09,0.513304,0.00102063,-1.28656e-06,1.90992e-08,0.514323,0.00101811,-1.22926e-06,-2.57786e-08,0.51534,0.00101557,-1.30659e-06,2.44104e-08,0.516355,0.00101303,-1.23336e-06,-1.22581e-08,0.517366,0.00101053,-1.27014e-06,2.4622e-08,0.518376,0.00100806,-1.19627e-06,-2.66253e-08,0.519383,0.00100559,-1.27615e-06,2.22744e-08,0.520387,0.00100311,-1.20932e-06,-2.8679e-09,0.521389,0.00100068,-1.21793e-06,-1.08029e-08,0.522388,0.000998211,-1.25034e-06,4.60795e-08,0.523385,0.000995849,-1.1121e-06,-5.4306e-08,0.52438,0.000993462,-1.27502e-06,5.19354e-08,0.525372,0.000991067,-1.11921e-06,-3.42262e-08,0.526362,0.000988726,-1.22189e-06,2.53646e-08,0.52735,0.000986359,-1.14579e-06,-7.62782e-09,0.528335,0.000984044,-1.16868e-06,5.14668e-09,0.529318,0.000981722,-1.15324e-06,-1.29589e-08,0.530298,0.000979377,-1.19211e-06,4.66888e-08,0.531276,0.000977133,-1.05205e-06,-5.45868e-08,0.532252,0.000974865,-1.21581e-06,5.24495e-08,0.533226,0.000972591,-1.05846e-06,-3.60019e-08,0.534198,0.000970366,-1.16647e-06,3.19537e-08,0.535167,0.000968129,-1.07061e-06,-3.2208e-08,0.536134,0.000965891,-1.16723e-06,3.72738e-08,0.537099,0.000963668,-1.05541e-06,2.32205e-09,0.538061,0.000961564,-1.04844e-06,-4.65618e-08,0.539022,0.000959328,-1.18813e-06,6.47159e-08,0.53998,0.000957146,-9.93979e-07,-3.3488e-08,0.540936,0.000955057,-1.09444e-06,9.63166e-09,0.54189,0.000952897,-1.06555e-06,-5.03871e-09,0.542842,0.000950751,-1.08066e-06,1.05232e-08,0.543792,0.000948621,-1.04909e-06,2.25503e-08,0.544739,0.000946591,-9.81444e-07,-4.11195e-08,0.545685,0.000944504,-1.1048e-06,2.27182e-08,0.546628,0.000942363,-1.03665e-06,9.85146e-09,0.54757,0.000940319,-1.00709e-06,-2.51938e-09,0.548509,0.000938297,-1.01465e-06,2.25858e-10,0.549446,0.000936269,-1.01397e-06,1.61598e-09,0.550381,0.000934246,-1.00913e-06,-6.68983e-09,0.551315,0.000932207,-1.0292e-06,2.51434e-08,0.552246,0.000930224,-9.53765e-07,-3.42793e-08,0.553175,0.000928214,-1.0566e-06,5.23688e-08,0.554102,0.000926258,-8.99497e-07,-5.59865e-08,0.555028,0.000924291,-1.06746e-06,5.23679e-08,0.555951,0.000922313,-9.10352e-07,-3.42763e-08,0.556872,0.00092039,-1.01318e-06,2.51326e-08,0.557792,0.000918439,-9.37783e-07,-6.64954e-09,0.558709,0.000916543,-9.57732e-07,1.46554e-09,0.559625,0.000914632,-9.53335e-07,7.87281e-10,0.560538,0.000912728,-9.50973e-07,-4.61466e-09,0.56145,0.000910812,-9.64817e-07,1.76713e-08,0.56236,0.000908935,-9.11804e-07,-6.46564e-09,0.563268,0.000907092,-9.312e-07,8.19121e-09,0.564174,0.000905255,-9.06627e-07,-2.62992e-08,0.565078,0.000903362,-9.85524e-07,3.74007e-08,0.565981,0.000901504,-8.73322e-07,-4.0942e-09,0.566882,0.000899745,-8.85605e-07,-2.1024e-08,0.56778,0.00089791,-9.48677e-07,2.85854e-08,0.568677,0.000896099,-8.62921e-07,-3.3713e-08,0.569573,0.000894272,-9.64059e-07,4.6662e-08,0.570466,0.000892484,-8.24073e-07,-3.37258e-08,0.571358,0.000890734,-9.25251e-07,2.86365e-08,0.572247,0.00088897,-8.39341e-07,-2.12155e-08,0.573135,0.000887227,-9.02988e-07,-3.37913e-09,0.574022,0.000885411,-9.13125e-07,3.47319e-08,0.574906,0.000883689,-8.08929e-07,-1.63394e-08,0.575789,0.000882022,-8.57947e-07,-2.8979e-08,0.57667,0.00088022,-9.44885e-07,7.26509e-08,0.57755,0.000878548,-7.26932e-07,-8.28106e-08,0.578427,0.000876845,-9.75364e-07,7.97774e-08,0.579303,0.000875134,-7.36032e-07,-5.74849e-08,0.580178,0.00087349,-9.08486e-07,3.09529e-08,0.58105,0.000871765,-8.15628e-07,-6.72206e-09,0.581921,0.000870114,-8.35794e-07,-4.06451e-09,0.582791,0.00086843,-8.47987e-07,2.29799e-08,0.583658,0.000866803,-7.79048e-07,-2.82503e-08,0.584524,0.00086516,-8.63799e-07,3.04167e-08,0.585388,0.000863524,-7.72548e-07,-3.38119e-08,0.586251,0.000861877,-8.73984e-07,4.52264e-08,0.587112,0.000860265,-7.38305e-07,-2.78842e-08,0.587972,0.000858705,-8.21958e-07,6.70567e-09,0.58883,0.000857081,-8.01841e-07,1.06161e-09,0.589686,0.000855481,-7.98656e-07,-1.09521e-08,0.590541,0.00085385,-8.31512e-07,4.27468e-08,0.591394,0.000852316,-7.03272e-07,-4.08257e-08,0.592245,0.000850787,-8.25749e-07,1.34677e-09,0.593095,0.000849139,-8.21709e-07,3.54387e-08,0.593944,0.000847602,-7.15393e-07,-2.38924e-08,0.59479,0.0008461,-7.8707e-07,5.26143e-10,0.595636,0.000844527,-7.85491e-07,2.17879e-08,0.596479,0.000843021,-7.20127e-07,-2.80733e-08,0.597322,0.000841497,-8.04347e-07,3.09005e-08,0.598162,0.000839981,-7.11646e-07,-3.5924e-08,0.599002,0.00083845,-8.19418e-07,5.3191e-08,0.599839,0.000836971,-6.59845e-07,-5.76307e-08,0.600676,0.000835478,-8.32737e-07,5.81227e-08,0.60151,0.000833987,-6.58369e-07,-5.56507e-08,0.602344,0.000832503,-8.25321e-07,4.52706e-08,0.603175,0.000830988,-6.89509e-07,-6.22236e-09,0.604006,0.000829591,-7.08176e-07,-2.03811e-08,0.604834,0.000828113,-7.6932e-07,2.8142e-08,0.605662,0.000826659,-6.84894e-07,-3.25822e-08,0.606488,0.000825191,-7.8264e-07,4.25823e-08,0.607312,0.000823754,-6.54893e-07,-1.85376e-08,0.608135,0.000822389,-7.10506e-07,-2.80365e-08,0.608957,0.000820883,-7.94616e-07,7.1079e-08,0.609777,0.000819507,-5.81379e-07,-7.74655e-08,0.610596,0.000818112,-8.13775e-07,5.9969e-08,0.611413,0.000816665,-6.33868e-07,-4.32013e-08,0.612229,0.000815267,-7.63472e-07,5.32313e-08,0.613044,0.0008139,-6.03778e-07,-5.05148e-08,0.613857,0.000812541,-7.55323e-07,2.96187e-08,0.614669,0.000811119,-6.66466e-07,-8.35545e-09,0.615479,0.000809761,-6.91533e-07,3.80301e-09,0.616288,0.00080839,-6.80124e-07,-6.85666e-09,0.617096,0.000807009,-7.00694e-07,2.36237e-08,0.617903,0.000805678,-6.29822e-07,-2.80336e-08,0.618708,0.000804334,-7.13923e-07,2.8906e-08,0.619511,0.000802993,-6.27205e-07,-2.79859e-08,0.620314,0.000801655,-7.11163e-07,2.34329e-08,0.621114,0.000800303,-6.40864e-07,-6.14108e-09,0.621914,0.000799003,-6.59287e-07,1.13151e-09,0.622712,0.000797688,-6.55893e-07,1.61507e-09,0.62351,0.000796381,-6.51048e-07,-7.59186e-09,0.624305,0.000795056,-6.73823e-07,2.87524e-08,0.6251,0.000793794,-5.87566e-07,-4.7813e-08,0.625893,0.000792476,-7.31005e-07,4.32901e-08,0.626685,0.000791144,-6.01135e-07,-6.13814e-09,0.627475,0.000789923,-6.19549e-07,-1.87376e-08,0.628264,0.000788628,-6.75762e-07,2.14837e-08,0.629052,0.000787341,-6.11311e-07,-7.59265e-09,0.629839,0.000786095,-6.34089e-07,8.88692e-09,0.630625,0.000784854,-6.07428e-07,-2.7955e-08,0.631409,0.000783555,-6.91293e-07,4.33285e-08,0.632192,0.000782302,-5.61307e-07,-2.61497e-08,0.632973,0.000781101,-6.39757e-07,1.6658e-09,0.633754,0.000779827,-6.34759e-07,1.94866e-08,0.634533,0.000778616,-5.76299e-07,-2.00076e-08,0.635311,0.000777403,-6.36322e-07,9.39091e-10,0.636088,0.000776133,-6.33505e-07,1.62512e-08,0.636863,0.000774915,-5.84751e-07,-6.33937e-09,0.637638,0.000773726,-6.03769e-07,9.10609e-09,0.638411,0.000772546,-5.76451e-07,-3.00849e-08,0.639183,0.000771303,-6.66706e-07,5.1629e-08,0.639953,0.000770125,-5.11819e-07,-5.7222e-08,0.640723,0.000768929,-6.83485e-07,5.80497e-08,0.641491,0.000767736,-5.09336e-07,-5.57674e-08,0.642259,0.000766551,-6.76638e-07,4.58105e-08,0.643024,0.000765335,-5.39206e-07,-8.26541e-09,0.643789,0.000764231,-5.64002e-07,-1.27488e-08,0.644553,0.000763065,-6.02249e-07,-3.44168e-10,0.645315,0.00076186,-6.03281e-07,1.41254e-08,0.646077,0.000760695,-5.60905e-07,3.44727e-09,0.646837,0.000759584,-5.50563e-07,-2.79144e-08,0.647596,0.000758399,-6.34307e-07,4.86057e-08,0.648354,0.000757276,-4.88489e-07,-4.72989e-08,0.64911,0.000756158,-6.30386e-07,2.13807e-08,0.649866,0.000754961,-5.66244e-07,2.13808e-08,0.65062,0.000753893,-5.02102e-07,-4.7299e-08,0.651374,0.000752746,-6.43999e-07,4.86059e-08,0.652126,0.000751604,-4.98181e-07,-2.79154e-08,0.652877,0.000750524,-5.81927e-07,3.45089e-09,0.653627,0.000749371,-5.71575e-07,1.41119e-08,0.654376,0.00074827,-5.29239e-07,-2.93748e-10,0.655123,0.00074721,-5.3012e-07,-1.29368e-08,0.65587,0.000746111,-5.68931e-07,-7.56355e-09,0.656616,0.000744951,-5.91621e-07,4.3191e-08,0.65736,0.000743897,-4.62048e-07,-4.59911e-08,0.658103,0.000742835,-6.00022e-07,2.15642e-08,0.658846,0.0007417,-5.35329e-07,1.93389e-08,0.659587,0.000740687,-4.77312e-07,-3.93152e-08,0.660327,0.000739615,-5.95258e-07,1.87126e-08,0.661066,0.00073848,-5.3912e-07,2.40695e-08,0.661804,0.000737474,-4.66912e-07,-5.53859e-08,0.662541,0.000736374,-6.33069e-07,7.82648e-08,0.663277,0.000735343,-3.98275e-07,-7.88593e-08,0.664012,0.00073431,-6.34853e-07,5.83585e-08,0.664745,0.000733215,-4.59777e-07,-3.53656e-08,0.665478,0.000732189,-5.65874e-07,2.34994e-08,0.66621,0.000731128,-4.95376e-07,9.72743e-10,0.66694,0.00073014,-4.92458e-07,-2.73903e-08,0.66767,0.000729073,-5.74629e-07,4.89839e-08,0.668398,0.000728071,-4.27677e-07,-4.93359e-08,0.669126,0.000727068,-5.75685e-07,2.91504e-08,0.669853,0.000726004,-4.88234e-07,-7.66109e-09,0.670578,0.000725004,-5.11217e-07,1.49392e-09,0.671303,0.000723986,-5.06735e-07,1.68533e-09,0.672026,0.000722978,-5.01679e-07,-8.23525e-09,0.672749,0.00072195,-5.26385e-07,3.12556e-08,0.67347,0.000720991,-4.32618e-07,-5.71825e-08,0.674191,0.000719954,-6.04166e-07,7.8265e-08,0.67491,0.00071898,-3.69371e-07,-7.70634e-08,0.675628,0.00071801,-6.00561e-07,5.11747e-08,0.676346,0.000716963,-4.47037e-07,-8.42615e-09,0.677062,0.000716044,-4.72315e-07,-1.747e-08,0.677778,0.000715046,-5.24725e-07,1.87015e-08,0.678493,0.000714053,-4.68621e-07,2.26856e-09,0.679206,0.000713123,-4.61815e-07,-2.77758e-08,0.679919,0.000712116,-5.45142e-07,4.92298e-08,0.68063,0.000711173,-3.97453e-07,-4.99339e-08,0.681341,0.000710228,-5.47255e-07,3.12967e-08,0.682051,0.000709228,-4.53365e-07,-1.56481e-08,0.68276,0.000708274,-5.00309e-07,3.12958e-08,0.683467,0.000707367,-4.06422e-07,-4.99303e-08,0.684174,0.000706405,-5.56213e-07,4.9216e-08,0.68488,0.00070544,-4.08565e-07,-2.77245e-08,0.685585,0.00070454,-4.91738e-07,2.07748e-09,0.686289,0.000703562,-4.85506e-07,1.94146e-08,0.686992,0.00070265,-4.27262e-07,-2.01314e-08,0.687695,0.000701735,-4.87656e-07,1.50616e-09,0.688396,0.000700764,-4.83137e-07,1.41067e-08,0.689096,0.00069984,-4.40817e-07,1.67168e-09,0.689795,0.000698963,-4.35802e-07,-2.07934e-08,0.690494,0.000698029,-4.98182e-07,2.18972e-08,0.691192,0.000697099,-4.32491e-07,-7.19092e-09,0.691888,0.000696212,-4.54064e-07,6.86642e-09,0.692584,0.000695325,-4.33464e-07,-2.02747e-08,0.693279,0.000694397,-4.94288e-07,1.46279e-08,0.693973,0.000693452,-4.50405e-07,2.13678e-08,0.694666,0.000692616,-3.86301e-07,-4.04945e-08,0.695358,0.000691721,-5.07785e-07,2.14009e-08,0.696049,0.00069077,-4.43582e-07,1.44955e-08,0.69674,0.000689926,-4.00096e-07,-1.97783e-08,0.697429,0.000689067,-4.5943e-07,5.01296e-09,0.698118,0.000688163,-4.44392e-07,-2.73521e-10,0.698805,0.000687273,-4.45212e-07,-3.91893e-09,0.699492,0.000686371,-4.56969e-07,1.59493e-08,0.700178,0.000685505,-4.09121e-07,-2.73351e-10,0.700863,0.000684686,-4.09941e-07,-1.4856e-08,0.701548,0.000683822,-4.54509e-07,9.25979e-11,0.702231,0.000682913,-4.54231e-07,1.44855e-08,0.702913,0.000682048,-4.10775e-07,1.56992e-09,0.703595,0.000681231,-4.06065e-07,-2.07652e-08,0.704276,0.000680357,-4.68361e-07,2.18864e-08,0.704956,0.000679486,-4.02701e-07,-7.17595e-09,0.705635,0.000678659,-4.24229e-07,6.81748e-09,0.706313,0.000677831,-4.03777e-07,-2.0094e-08,0.70699,0.000676963,-4.64059e-07,1.39538e-08,0.707667,0.000676077,-4.22197e-07,2.38835e-08,0.708343,0.000675304,-3.50547e-07,-4.98831e-08,0.709018,0.000674453,-5.00196e-07,5.64395e-08,0.709692,0.000673622,-3.30878e-07,-5.66657e-08,0.710365,0.00067279,-5.00875e-07,5.1014e-08,0.711037,0.000671942,-3.47833e-07,-2.81809e-08,0.711709,0.000671161,-4.32376e-07,2.10513e-09,0.712379,0.000670303,-4.2606e-07,1.97604e-08,0.713049,0.00066951,-3.66779e-07,-2.15422e-08,0.713718,0.000668712,-4.31406e-07,6.8038e-09,0.714387,0.000667869,-4.10994e-07,-5.67295e-09,0.715054,0.00066703,-4.28013e-07,1.5888e-08,0.715721,0.000666222,-3.80349e-07,1.72576e-09,0.716387,0.000665467,-3.75172e-07,-2.27911e-08,0.717052,0.000664648,-4.43545e-07,2.9834e-08,0.717716,0.00066385,-3.54043e-07,-3.69401e-08,0.718379,0.000663031,-4.64864e-07,5.83219e-08,0.719042,0.000662277,-2.89898e-07,-7.71382e-08,0.719704,0.000661465,-5.21313e-07,7.14171e-08,0.720365,0.000660637,-3.07061e-07,-2.97161e-08,0.721025,0.000659934,-3.96209e-07,-1.21575e-08,0.721685,0.000659105,-4.32682e-07,1.87412e-08,0.722343,0.000658296,-3.76458e-07,-3.2029e-09,0.723001,0.000657533,-3.86067e-07,-5.9296e-09,0.723659,0.000656743,-4.03856e-07,2.69213e-08,0.724315,0.000656016,-3.23092e-07,-4.21511e-08,0.724971,0.000655244,-4.49545e-07,2.24737e-08,0.725625,0.000654412,-3.82124e-07,1.18611e-08,0.726279,0.000653683,-3.46541e-07,-1.03132e-08,0.726933,0.000652959,-3.7748e-07,-3.02128e-08,0.727585,0.000652114,-4.68119e-07,7.15597e-08,0.728237,0.000651392,-2.5344e-07,-7.72119e-08,0.728888,0.000650654,-4.85075e-07,5.8474e-08,0.729538,0.000649859,-3.09654e-07,-3.74746e-08,0.730188,0.000649127,-4.22077e-07,3.18197e-08,0.730837,0.000648379,-3.26618e-07,-3.01997e-08,0.731485,0.000647635,-4.17217e-07,2.93747e-08,0.732132,0.000646888,-3.29093e-07,-2.76943e-08,0.732778,0.000646147,-4.12176e-07,2.17979e-08,0.733424,0.000645388,-3.46783e-07,1.07292e-10,0.734069,0.000644695,-3.46461e-07,-2.22271e-08,0.734713,0.000643935,-4.13142e-07,2.91963e-08,0.735357,0.000643197,-3.25553e-07,-3.49536e-08,0.736,0.000642441,-4.30414e-07,5.10133e-08,0.736642,0.000641733,-2.77374e-07,-4.98904e-08,0.737283,0.000641028,-4.27045e-07,2.93392e-08,0.737924,0.000640262,-3.39028e-07,-7.86156e-09,0.738564,0.000639561,-3.62612e-07,2.10703e-09,0.739203,0.000638842,-3.56291e-07,-5.6653e-10,0.739842,0.000638128,-3.57991e-07,1.59086e-10,0.740479,0.000637412,-3.57513e-07,-6.98321e-11,0.741116,0.000636697,-3.57723e-07,1.20214e-10,0.741753,0.000635982,-3.57362e-07,-4.10987e-10,0.742388,0.000635266,-3.58595e-07,1.5237e-09,0.743023,0.000634553,-3.54024e-07,-5.68376e-09,0.743657,0.000633828,-3.71075e-07,2.12113e-08,0.744291,0.00063315,-3.07441e-07,-1.95569e-08,0.744924,0.000632476,-3.66112e-07,-2.58816e-09,0.745556,0.000631736,-3.73877e-07,2.99096e-08,0.746187,0.000631078,-2.84148e-07,-5.74454e-08,0.746818,0.000630337,-4.56484e-07,8.06629e-08,0.747448,0.000629666,-2.14496e-07,-8.63922e-08,0.748077,0.000628978,-4.73672e-07,8.60918e-08,0.748706,0.000628289,-2.15397e-07,-7.91613e-08,0.749334,0.000627621,-4.5288e-07,5.17393e-08,0.749961,0.00062687,-2.97663e-07,-8.58662e-09,0.750588,0.000626249,-3.23422e-07,-1.73928e-08,0.751214,0.00062555,-3.75601e-07,1.85532e-08,0.751839,0.000624855,-3.19941e-07,2.78479e-09,0.752463,0.000624223,-3.11587e-07,-2.96923e-08,0.753087,0.000623511,-4.00664e-07,5.63799e-08,0.75371,0.000622879,-2.31524e-07,-7.66179e-08,0.754333,0.000622186,-4.61378e-07,7.12778e-08,0.754955,0.000621477,-2.47545e-07,-2.96794e-08,0.755576,0.000620893,-3.36583e-07,-1.21648e-08,0.756196,0.000620183,-3.73077e-07,1.87339e-08,0.756816,0.000619493,-3.16875e-07,-3.16622e-09,0.757435,0.00061885,-3.26374e-07,-6.0691e-09,0.758054,0.000618179,-3.44581e-07,2.74426e-08,0.758672,0.000617572,-2.62254e-07,-4.40968e-08,0.759289,0.000616915,-3.94544e-07,2.97352e-08,0.759906,0.000616215,-3.05338e-07,-1.52393e-08,0.760522,0.000615559,-3.51056e-07,3.12221e-08,0.761137,0.000614951,-2.5739e-07,-5.00443e-08,0.761751,0.000614286,-4.07523e-07,4.9746e-08,0.762365,0.00061362,-2.58285e-07,-2.97303e-08,0.762979,0.000613014,-3.47476e-07,9.57079e-09,0.763591,0.000612348,-3.18764e-07,-8.55287e-09,0.764203,0.000611685,-3.44422e-07,2.46407e-08,0.764815,0.00061107,-2.705e-07,-3.04053e-08,0.765426,0.000610437,-3.61716e-07,3.73759e-08,0.766036,0.000609826,-2.49589e-07,-5.94935e-08,0.766645,0.000609149,-4.28069e-07,8.13889e-08,0.767254,0.000608537,-1.83902e-07,-8.72483e-08,0.767862,0.000607907,-4.45647e-07,8.87901e-08,0.76847,0.000607282,-1.79277e-07,-8.90983e-08,0.769077,0.000606656,-4.46572e-07,8.87892e-08,0.769683,0.000606029,-1.80204e-07,-8.72446e-08,0.770289,0.000605407,-4.41938e-07,8.13752e-08,0.770894,0.000604768,-1.97812e-07,-5.94423e-08,0.771498,0.000604194,-3.76139e-07,3.71848e-08,0.772102,0.000603553,-2.64585e-07,-2.96922e-08,0.772705,0.000602935,-3.53661e-07,2.19793e-08,0.773308,0.000602293,-2.87723e-07,1.37955e-09,0.77391,0.000601722,-2.83585e-07,-2.74976e-08,0.774512,0.000601072,-3.66077e-07,4.9006e-08,0.775112,0.000600487,-2.19059e-07,-4.93171e-08,0.775712,0.000599901,-3.67011e-07,2.90531e-08,0.776312,0.000599254,-2.79851e-07,-7.29081e-09,0.776911,0.000598673,-3.01724e-07,1.10077e-10,0.777509,0.00059807,-3.01393e-07,6.85053e-09,0.778107,0.000597487,-2.80842e-07,-2.75123e-08,0.778704,0.000596843,-3.63379e-07,4.35939e-08,0.779301,0.000596247,-2.32597e-07,-2.7654e-08,0.779897,0.000595699,-3.15559e-07,7.41741e-09,0.780492,0.00059509,-2.93307e-07,-2.01562e-09,0.781087,0.000594497,-2.99354e-07,6.45059e-10,0.781681,0.000593901,-2.97418e-07,-5.64635e-10,0.782275,0.000593304,-2.99112e-07,1.61347e-09,0.782868,0.000592711,-2.94272e-07,-5.88926e-09,0.78346,0.000592105,-3.1194e-07,2.19436e-08,0.784052,0.000591546,-2.46109e-07,-2.22805e-08,0.784643,0.000590987,-3.1295e-07,7.57368e-09,0.785234,0.000590384,-2.90229e-07,-8.01428e-09,0.785824,0.00058978,-3.14272e-07,2.44834e-08,0.786414,0.000589225,-2.40822e-07,-3.03148e-08,0.787003,0.000588652,-3.31766e-07,3.7171e-08,0.787591,0.0005881,-2.20253e-07,-5.87646e-08,0.788179,0.000587483,-3.96547e-07,7.86782e-08,0.788766,0.000586926,-1.60512e-07,-7.71342e-08,0.789353,0.000586374,-3.91915e-07,5.10444e-08,0.789939,0.000585743,-2.38782e-07,-7.83422e-09,0.790524,0.000585242,-2.62284e-07,-1.97076e-08,0.791109,0.000584658,-3.21407e-07,2.70598e-08,0.791693,0.000584097,-2.40228e-07,-2.89269e-08,0.792277,0.000583529,-3.27008e-07,2.90431e-08,0.792861,0.000582963,-2.39879e-07,-2.76409e-08,0.793443,0.0005824,-3.22802e-07,2.1916e-08,0.794025,0.00058182,-2.57054e-07,-4.18368e-10,0.794607,0.000581305,-2.58309e-07,-2.02425e-08,0.795188,0.000580727,-3.19036e-07,2.17838e-08,0.795768,0.000580155,-2.53685e-07,-7.28814e-09,0.796348,0.000579625,-2.75549e-07,7.36871e-09,0.796928,0.000579096,-2.53443e-07,-2.21867e-08,0.797506,0.000578523,-3.20003e-07,2.17736e-08,0.798085,0.000577948,-2.54683e-07,-5.30296e-09,0.798662,0.000577423,-2.70592e-07,-5.61698e-10,0.799239,0.00057688,-2.72277e-07,7.54977e-09,0.799816,0.000576358,-2.49627e-07,-2.96374e-08,0.800392,0.00057577,-3.38539e-07,5.1395e-08,0.800968,0.000575247,-1.84354e-07,-5.67335e-08,0.801543,0.000574708,-3.54555e-07,5.63297e-08,0.802117,0.000574168,-1.85566e-07,-4.93759e-08,0.802691,0.000573649,-3.33693e-07,2.19646e-08,0.803264,0.000573047,-2.678e-07,2.1122e-08,0.803837,0.000572575,-2.04433e-07,-4.68482e-08,0.804409,0.000572026,-3.44978e-07,4.70613e-08,0.804981,0.000571477,-2.03794e-07,-2.21877e-08,0.805552,0.000571003,-2.70357e-07,-1.79153e-08,0.806123,0.000570408,-3.24103e-07,3.42443e-08,0.806693,0.000569863,-2.2137e-07,1.47556e-10,0.807263,0.000569421,-2.20928e-07,-3.48345e-08,0.807832,0.000568874,-3.25431e-07,1.99812e-08,0.808401,0.000568283,-2.65487e-07,1.45143e-08,0.808969,0.000567796,-2.21945e-07,-1.84338e-08,0.809536,0.000567297,-2.77246e-07,-3.83608e-10,0.810103,0.000566741,-2.78397e-07,1.99683e-08,0.81067,0.000566244,-2.18492e-07,-1.98848e-08,0.811236,0.000565747,-2.78146e-07,-3.38976e-11,0.811801,0.000565191,-2.78248e-07,2.00204e-08,0.812366,0.000564695,-2.18187e-07,-2.04429e-08,0.812931,0.000564197,-2.79516e-07,2.1467e-09,0.813495,0.000563644,-2.73076e-07,1.18561e-08,0.814058,0.000563134,-2.37507e-07,1.00334e-08,0.814621,0.000562689,-2.07407e-07,-5.19898e-08,0.815183,0.000562118,-3.63376e-07,7.87163e-08,0.815745,0.000561627,-1.27227e-07,-8.40616e-08,0.816306,0.000561121,-3.79412e-07,7.87163e-08,0.816867,0.000560598,-1.43263e-07,-5.19898e-08,0.817428,0.000560156,-2.99233e-07,1.00335e-08,0.817988,0.000559587,-2.69132e-07,1.18559e-08,0.818547,0.000559085,-2.33564e-07,2.14764e-09,0.819106,0.000558624,-2.27122e-07,-2.04464e-08,0.819664,0.000558108,-2.88461e-07,2.00334e-08,0.820222,0.000557591,-2.28361e-07,-8.24277e-11,0.820779,0.000557135,-2.28608e-07,-1.97037e-08,0.821336,0.000556618,-2.87719e-07,1.92925e-08,0.821893,0.000556101,-2.29841e-07,2.13831e-09,0.822448,0.000555647,-2.23427e-07,-2.78458e-08,0.823004,0.000555117,-3.06964e-07,4.96402e-08,0.823559,0.000554652,-1.58043e-07,-5.15058e-08,0.824113,0.000554181,-3.12561e-07,3.71737e-08,0.824667,0.000553668,-2.0104e-07,-3.75844e-08,0.82522,0.000553153,-3.13793e-07,5.35592e-08,0.825773,0.000552686,-1.53115e-07,-5.74431e-08,0.826326,0.000552207,-3.25444e-07,5.7004e-08,0.826878,0.000551728,-1.54433e-07,-5.13635e-08,0.827429,0.000551265,-3.08523e-07,2.92406e-08,0.82798,0.000550735,-2.20801e-07,-5.99424e-09,0.828531,0.000550276,-2.38784e-07,-5.26363e-09,0.829081,0.000549782,-2.54575e-07,2.70488e-08,0.82963,0.000549354,-1.73429e-07,-4.33268e-08,0.83018,0.000548878,-3.03409e-07,2.7049e-08,0.830728,0.000548352,-2.22262e-07,-5.26461e-09,0.831276,0.000547892,-2.38056e-07,-5.99057e-09,0.831824,0.000547397,-2.56027e-07,2.92269e-08,0.832371,0.000546973,-1.68347e-07,-5.13125e-08,0.832918,0.000546482,-3.22284e-07,5.68139e-08,0.833464,0.000546008,-1.51843e-07,-5.67336e-08,0.83401,0.000545534,-3.22043e-07,5.09113e-08,0.834555,0.000545043,-1.6931e-07,-2.77022e-08,0.8351,0.000544621,-2.52416e-07,2.92924e-10,0.835644,0.000544117,-2.51537e-07,2.65305e-08,0.836188,0.000543694,-1.71946e-07,-4.68105e-08,0.836732,0.00054321,-3.12377e-07,4.15021e-08,0.837275,0.000542709,-1.87871e-07,1.13355e-11,0.837817,0.000542334,-1.87837e-07,-4.15474e-08,0.838359,0.000541833,-3.12479e-07,4.69691e-08,0.838901,0.000541349,-1.71572e-07,-2.71196e-08,0.839442,0.000540925,-2.52931e-07,1.90462e-09,0.839983,0.000540425,-2.47217e-07,1.95011e-08,0.840523,0.000539989,-1.88713e-07,-2.03045e-08,0.841063,0.00053955,-2.49627e-07,2.11216e-09,0.841602,0.000539057,-2.4329e-07,1.18558e-08,0.842141,0.000538606,-2.07723e-07,1.00691e-08,0.842679,0.000538221,-1.77516e-07,-5.21324e-08,0.843217,0.00053771,-3.33913e-07,7.92513e-08,0.843755,0.00053728,-9.6159e-08,-8.60587e-08,0.844292,0.000536829,-3.54335e-07,8.61696e-08,0.844828,0.000536379,-9.58263e-08,-7.98057e-08,0.845364,0.000535948,-3.35243e-07,5.42394e-08,0.8459,0.00053544,-1.72525e-07,-1.79426e-08,0.846435,0.000535041,-2.26353e-07,1.75308e-08,0.84697,0.000534641,-1.73761e-07,-5.21806e-08,0.847505,0.000534137,-3.30302e-07,7.19824e-08,0.848038,0.000533692,-1.14355e-07,-5.69349e-08,0.848572,0.000533293,-2.8516e-07,3.65479e-08,0.849105,0.000532832,-1.75516e-07,-2.96519e-08,0.849638,0.000532392,-2.64472e-07,2.2455e-08,0.85017,0.000531931,-1.97107e-07,-5.63451e-10,0.850702,0.000531535,-1.98797e-07,-2.02011e-08,0.851233,0.000531077,-2.59401e-07,2.17634e-08,0.851764,0.000530623,-1.94111e-07,-7.24794e-09,0.852294,0.000530213,-2.15854e-07,7.22832e-09,0.852824,0.000529803,-1.94169e-07,-2.16653e-08,0.853354,0.00052935,-2.59165e-07,1.98283e-08,0.853883,0.000528891,-1.9968e-07,1.95678e-09,0.854412,0.000528497,-1.9381e-07,-2.76554e-08,0.85494,0.000528027,-2.76776e-07,4.90603e-08,0.855468,0.00052762,-1.29596e-07,-4.93764e-08,0.855995,0.000527213,-2.77725e-07,2.92361e-08,0.856522,0.000526745,-1.90016e-07,-7.96341e-09,0.857049,0.000526341,-2.13907e-07,2.61752e-09,0.857575,0.000525922,-2.06054e-07,-2.50665e-09,0.8581,0.000525502,-2.13574e-07,7.40906e-09,0.858626,0.000525097,-1.91347e-07,-2.71296e-08,0.859151,0.000524633,-2.72736e-07,4.15048e-08,0.859675,0.000524212,-1.48221e-07,-1.96802e-08,0.860199,0.000523856,-2.07262e-07,-2.23886e-08,0.860723,0.000523375,-2.74428e-07,4.96299e-08,0.861246,0.000522975,-1.25538e-07,-5.69216e-08,0.861769,0.000522553,-2.96303e-07,5.88473e-08,0.862291,0.000522137,-1.19761e-07,-5.92584e-08,0.862813,0.00052172,-2.97536e-07,5.8977e-08,0.863334,0.000521301,-1.20605e-07,-5.74403e-08,0.863855,0.000520888,-2.92926e-07,5.15751e-08,0.864376,0.000520457,-1.38201e-07,-2.96506e-08,0.864896,0.000520091,-2.27153e-07,7.42277e-09,0.865416,0.000519659,-2.04885e-07,-4.05057e-11,0.865936,0.00051925,-2.05006e-07,-7.26074e-09,0.866455,0.000518818,-2.26788e-07,2.90835e-08,0.866973,0.000518451,-1.39538e-07,-4.94686e-08,0.867492,0.000518024,-2.87944e-07,4.95814e-08,0.868009,0.000517597,-1.39199e-07,-2.96479e-08,0.868527,0.000517229,-2.28143e-07,9.40539e-09,0.869044,0.000516801,-1.99927e-07,-7.9737e-09,0.86956,0.000516378,-2.23848e-07,2.24894e-08,0.870077,0.000515997,-1.5638e-07,-2.23793e-08,0.870592,0.000515617,-2.23517e-07,7.42302e-09,0.871108,0.000515193,-2.01248e-07,-7.31283e-09,0.871623,0.000514768,-2.23187e-07,2.18283e-08,0.872137,0.000514387,-1.57702e-07,-2.03959e-08,0.872652,0.000514011,-2.1889e-07,1.50711e-10,0.873165,0.000513573,-2.18437e-07,1.97931e-08,0.873679,0.000513196,-1.59058e-07,-1.97183e-08,0.874192,0.000512819,-2.18213e-07,-5.24324e-10,0.874704,0.000512381,-2.19786e-07,2.18156e-08,0.875217,0.000512007,-1.54339e-07,-2.71336e-08,0.875728,0.000511616,-2.3574e-07,2.71141e-08,0.87624,0.000511226,-1.54398e-07,-2.17182e-08,0.876751,0.000510852,-2.19552e-07,1.54131e-10,0.877262,0.000510414,-2.1909e-07,2.11017e-08,0.877772,0.000510039,-1.55785e-07,-2.49562e-08,0.878282,0.000509652,-2.30654e-07,1.91183e-08,0.878791,0.000509248,-1.73299e-07,8.08751e-09,0.8793,0.000508926,-1.49036e-07,-5.14684e-08,0.879809,0.000508474,-3.03441e-07,7.85766e-08,0.880317,0.000508103,-6.77112e-08,-8.40242e-08,0.880825,0.000507715,-3.19784e-07,7.87063e-08,0.881333,0.000507312,-8.36649e-08,-5.19871e-08,0.88184,0.000506988,-2.39626e-07,1.00327e-08,0.882346,0.000506539,-2.09528e-07,1.18562e-08,0.882853,0.000506156,-1.73959e-07,2.14703e-09,0.883359,0.000505814,-1.67518e-07,-2.04444e-08,0.883864,0.000505418,-2.28851e-07,2.00258e-08,0.88437,0.00050502,-1.68774e-07,-5.42855e-11,0.884874,0.000504682,-1.68937e-07,-1.98087e-08,0.885379,0.000504285,-2.28363e-07,1.96842e-08,0.885883,0.000503887,-1.6931e-07,6.76342e-10,0.886387,0.000503551,-1.67281e-07,-2.23896e-08,0.88689,0.000503149,-2.3445e-07,2.92774e-08,0.887393,0.000502768,-1.46618e-07,-3.51152e-08,0.887896,0.00050237,-2.51963e-07,5.15787e-08,0.888398,0.00050202,-9.72271e-08,-5.19903e-08,0.8889,0.00050167,-2.53198e-07,3.71732e-08,0.889401,0.000501275,-1.41678e-07,-3.70978e-08,0.889902,0.00050088,-2.52972e-07,5.16132e-08,0.890403,0.000500529,-9.81321e-08,-5.01459e-08,0.890903,0.000500183,-2.4857e-07,2.9761e-08,0.891403,0.000499775,-1.59287e-07,-9.29351e-09,0.891903,0.000499428,-1.87167e-07,7.41301e-09,0.892402,0.000499076,-1.64928e-07,-2.03585e-08,0.892901,0.000498685,-2.26004e-07,1.44165e-08,0.893399,0.000498276,-1.82754e-07,2.22974e-08,0.893898,0.000497978,-1.15862e-07,-4.40013e-08,0.894395,0.000497614,-2.47866e-07,3.44985e-08,0.894893,0.000497222,-1.44371e-07,-3.43882e-08,0.89539,0.00049683,-2.47535e-07,4.34497e-08,0.895886,0.000496465,-1.17186e-07,-2.02012e-08,0.896383,0.00049617,-1.7779e-07,-2.22497e-08,0.896879,0.000495748,-2.44539e-07,4.95952e-08,0.897374,0.000495408,-9.57532e-08,-5.69217e-08,0.89787,0.000495045,-2.66518e-07,5.88823e-08,0.898364,0.000494689,-8.98713e-08,-5.93983e-08,0.898859,0.000494331,-2.68066e-07,5.95017e-08,0.899353,0.000493973,-8.95613e-08,-5.9399e-08,0.899847,0.000493616,-2.67758e-07,5.8885e-08,0.90034,0.000493257,-9.11033e-08,-5.69317e-08,0.900833,0.000492904,-2.61898e-07,4.96326e-08,0.901326,0.000492529,-1.13001e-07,-2.23893e-08,0.901819,0.000492236,-1.80169e-07,-1.968e-08,0.902311,0.000491817,-2.39209e-07,4.15047e-08,0.902802,0.000491463,-1.14694e-07,-2.71296e-08,0.903293,0.000491152,-1.96083e-07,7.409e-09,0.903784,0.000490782,-1.73856e-07,-2.50645e-09,0.904275,0.000490427,-1.81376e-07,2.61679e-09,0.904765,0.000490072,-1.73525e-07,-7.96072e-09,0.905255,0.000489701,-1.97407e-07,2.92261e-08,0.905745,0.000489394,-1.09729e-07,-4.93389e-08,0.906234,0.000489027,-2.57746e-07,4.89204e-08,0.906723,0.000488658,-1.10985e-07,-2.71333e-08,0.907211,0.000488354,-1.92385e-07,8.30861e-12,0.907699,0.00048797,-1.9236e-07,2.71001e-08,0.908187,0.000487666,-1.1106e-07,-4.88041e-08,0.908675,0.000487298,-2.57472e-07,4.89069e-08,0.909162,0.000486929,-1.10751e-07,-2.76143e-08,0.909649,0.000486625,-1.93594e-07,1.9457e-09,0.910135,0.000486244,-1.87757e-07,1.98315e-08,0.910621,0.000485928,-1.28262e-07,-2.16671e-08,0.911107,0.000485606,-1.93264e-07,7.23216e-09,0.911592,0.000485241,-1.71567e-07,-7.26152e-09,0.912077,0.000484877,-1.93352e-07,2.18139e-08,0.912562,0.000484555,-1.2791e-07,-2.03895e-08,0.913047,0.000484238,-1.89078e-07,1.39494e-10,0.913531,0.000483861,-1.8866e-07,1.98315e-08,0.914014,0.000483543,-1.29165e-07,-1.98609e-08,0.914498,0.000483225,-1.88748e-07,7.39912e-12,0.914981,0.000482847,-1.88726e-07,1.98313e-08,0.915463,0.000482529,-1.29232e-07,-1.9728e-08,0.915946,0.000482212,-1.88416e-07,-5.24035e-10,0.916428,0.000481833,-1.89988e-07,2.18241e-08,0.916909,0.000481519,-1.24516e-07,-2.71679e-08,0.917391,0.000481188,-2.06019e-07,2.72427e-08,0.917872,0.000480858,-1.24291e-07,-2.21985e-08,0.918353,0.000480543,-1.90886e-07,1.94644e-09,0.918833,0.000480167,-1.85047e-07,1.44127e-08,0.919313,0.00047984,-1.41809e-07,7.39438e-12,0.919793,0.000479556,-1.41787e-07,-1.44423e-08,0.920272,0.000479229,-1.85114e-07,-1.84291e-09,0.920751,0.000478854,-1.90642e-07,2.18139e-08,0.92123,0.000478538,-1.25201e-07,-2.58081e-08,0.921708,0.00047821,-2.02625e-07,2.18139e-08,0.922186,0.00047787,-1.37183e-07,-1.84291e-09,0.922664,0.00047759,-1.42712e-07,-1.44423e-08,0.923141,0.000477262,-1.86039e-07,7.34701e-12,0.923618,0.00047689,-1.86017e-07,1.44129e-08,0.924095,0.000476561,-1.42778e-07,1.94572e-09,0.924572,0.000476281,-1.36941e-07,-2.21958e-08,0.925048,0.000475941,-2.03528e-07,2.72327e-08,0.925523,0.000475615,-1.2183e-07,-2.71304e-08,0.925999,0.00047529,-2.03221e-07,2.16843e-08,0.926474,0.000474949,-1.38168e-07,-2.16005e-12,0.926949,0.000474672,-1.38175e-07,-2.16756e-08,0.927423,0.000474331,-2.03202e-07,2.71001e-08,0.927897,0.000474006,-1.21902e-07,-2.71201e-08,0.928371,0.000473681,-2.03262e-07,2.17757e-08,0.928845,0.00047334,-1.37935e-07,-3.78028e-10,0.929318,0.000473063,-1.39069e-07,-2.02636e-08,0.929791,0.000472724,-1.9986e-07,2.18276e-08,0.930263,0.000472389,-1.34377e-07,-7.44231e-09,0.930736,0.000472098,-1.56704e-07,7.94165e-09,0.931208,0.000471809,-1.32879e-07,-2.43243e-08,0.931679,0.00047147,-2.05851e-07,2.97508e-08,0.932151,0.000471148,-1.16599e-07,-3.50742e-08,0.932622,0.000470809,-2.21822e-07,5.09414e-08,0.933092,0.000470518,-6.89976e-08,-4.94821e-08,0.933563,0.000470232,-2.17444e-07,2.77775e-08,0.934033,0.00046988,-1.34111e-07,-2.02351e-09,0.934502,0.000469606,-1.40182e-07,-1.96835e-08,0.934972,0.000469267,-1.99232e-07,2.11529e-08,0.935441,0.000468932,-1.35774e-07,-5.32332e-09,0.93591,0.000468644,-1.51743e-07,1.40413e-10,0.936378,0.000468341,-1.51322e-07,4.76166e-09,0.936846,0.000468053,-1.37037e-07,-1.9187e-08,0.937314,0.000467721,-1.94598e-07,1.23819e-08,0.937782,0.000467369,-1.57453e-07,2.92642e-08,0.938249,0.000467142,-6.96601e-08,-6.98342e-08,0.938716,0.000466793,-2.79163e-07,7.12586e-08,0.939183,0.000466449,-6.53869e-08,-3.63863e-08,0.939649,0.000466209,-1.74546e-07,1.46818e-08,0.940115,0.000465904,-1.305e-07,-2.2341e-08,0.940581,0.000465576,-1.97523e-07,1.50774e-08,0.941046,0.000465226,-1.52291e-07,2.16359e-08,0.941511,0.000464986,-8.73832e-08,-4.20162e-08,0.941976,0.000464685,-2.13432e-07,2.72198e-08,0.942441,0.00046434,-1.31773e-07,-7.2581e-09,0.942905,0.000464055,-1.53547e-07,1.81263e-09,0.943369,0.000463753,-1.48109e-07,7.58386e-12,0.943832,0.000463457,-1.48086e-07,-1.84298e-09,0.944296,0.000463155,-1.53615e-07,7.36433e-09,0.944759,0.00046287,-1.31522e-07,-2.76143e-08,0.945221,0.000462524,-2.14365e-07,4.34883e-08,0.945684,0.000462226,-8.39003e-08,-2.71297e-08,0.946146,0.000461977,-1.65289e-07,5.42595e-09,0.946608,0.000461662,-1.49012e-07,5.42593e-09,0.947069,0.000461381,-1.32734e-07,-2.71297e-08,0.94753,0.000461034,-2.14123e-07,4.34881e-08,0.947991,0.000460736,-8.36585e-08,-2.76134e-08,0.948452,0.000460486,-1.66499e-07,7.36083e-09,0.948912,0.000460175,-1.44416e-07,-1.82993e-09,0.949372,0.000459881,-1.49906e-07,-4.11073e-11,0.949832,0.000459581,-1.50029e-07,1.99434e-09,0.950291,0.000459287,-1.44046e-07,-7.93627e-09,0.950751,0.000458975,-1.67855e-07,2.97507e-08,0.951209,0.000458728,-7.86029e-08,-5.1462e-08,0.951668,0.000458417,-2.32989e-07,5.6888e-08,0.952126,0.000458121,-6.2325e-08,-5.68806e-08,0.952584,0.000457826,-2.32967e-07,5.14251e-08,0.953042,0.000457514,-7.86914e-08,-2.96107e-08,0.953499,0.000457268,-1.67523e-07,7.41296e-09,0.953956,0.000456955,-1.45285e-07,-4.11262e-11,0.954413,0.000456665,-1.45408e-07,-7.24847e-09,0.95487,0.000456352,-1.67153e-07,2.9035e-08,0.955326,0.000456105,-8.00484e-08,-4.92869e-08,0.955782,0.000455797,-2.27909e-07,4.89032e-08,0.956238,0.000455488,-8.11994e-08,-2.71166e-08,0.956693,0.000455244,-1.62549e-07,-4.13678e-11,0.957148,0.000454919,-1.62673e-07,2.72821e-08,0.957603,0.000454675,-8.0827e-08,-4.94824e-08,0.958057,0.000454365,-2.29274e-07,5.14382e-08,0.958512,0.000454061,-7.49597e-08,-3.7061e-08,0.958965,0.0004538,-1.86143e-07,3.72013e-08,0.959419,0.000453539,-7.45389e-08,-5.21396e-08,0.959873,0.000453234,-2.30958e-07,5.21476e-08,0.960326,0.000452928,-7.45146e-08,-3.72416e-08,0.960778,0.000452667,-1.8624e-07,3.72143e-08,0.961231,0.000452407,-7.45967e-08,-5.20109e-08,0.961683,0.000452101,-2.30629e-07,5.16199e-08,0.962135,0.000451795,-7.57696e-08,-3.52595e-08,0.962587,0.000451538,-1.81548e-07,2.98133e-08,0.963038,0.000451264,-9.2108e-08,-2.43892e-08,0.963489,0.000451007,-1.65276e-07,8.13892e-09,0.96394,0.000450701,-1.40859e-07,-8.16647e-09,0.964391,0.000450394,-1.65358e-07,2.45269e-08,0.964841,0.000450137,-9.17775e-08,-3.03367e-08,0.965291,0.000449863,-1.82787e-07,3.7215e-08,0.965741,0.000449609,-7.11424e-08,-5.89188e-08,0.96619,0.00044929,-2.47899e-07,7.92509e-08,0.966639,0.000449032,-1.01462e-08,-7.92707e-08,0.967088,0.000448773,-2.47958e-07,5.90181e-08,0.967537,0.000448455,-7.0904e-08,-3.75925e-08,0.967985,0.0004482,-1.83681e-07,3.17471e-08,0.968433,0.000447928,-8.84401e-08,-2.97913e-08,0.968881,0.000447662,-1.77814e-07,2.78133e-08,0.969329,0.000447389,-9.4374e-08,-2.18572e-08,0.969776,0.000447135,-1.59946e-07,1.10134e-11,0.970223,0.000446815,-1.59913e-07,2.18132e-08,0.97067,0.000446561,-9.44732e-08,-2.76591e-08,0.971116,0.000446289,-1.7745e-07,2.92185e-08,0.971562,0.000446022,-8.97948e-08,-2.96104e-08,0.972008,0.000445753,-1.78626e-07,2.96185e-08,0.972454,0.000445485,-8.97706e-08,-2.92588e-08,0.972899,0.000445218,-1.77547e-07,2.78123e-08,0.973344,0.000444946,-9.41103e-08,-2.23856e-08,0.973789,0.000444691,-1.61267e-07,2.12559e-09,0.974233,0.000444374,-1.5489e-07,1.38833e-08,0.974678,0.000444106,-1.13241e-07,1.94591e-09,0.975122,0.000443886,-1.07403e-07,-2.16669e-08,0.975565,0.000443606,-1.72404e-07,2.5117e-08,0.976009,0.000443336,-9.70526e-08,-1.91963e-08,0.976452,0.000443085,-1.54642e-07,-7.93627e-09,0.976895,0.000442752,-1.7845e-07,5.09414e-08,0.977338,0.000442548,-2.56262e-08,-7.66201e-08,0.97778,0.000442266,-2.55486e-07,7.67249e-08,0.978222,0.000441986,-2.53118e-08,-5.14655e-08,0.978664,0.000441781,-1.79708e-07,9.92773e-09,0.979106,0.000441451,-1.49925e-07,1.17546e-08,0.979547,0.000441186,-1.14661e-07,2.65868e-09,0.979988,0.000440965,-1.06685e-07,-2.23893e-08,0.980429,0.000440684,-1.73853e-07,2.72939e-08,0.980869,0.000440419,-9.19716e-08,-2.71816e-08,0.98131,0.000440153,-1.73516e-07,2.18278e-08,0.98175,0.000439872,-1.08033e-07,-5.24833e-10,0.982189,0.000439654,-1.09607e-07,-1.97284e-08,0.982629,0.000439376,-1.68793e-07,1.98339e-08,0.983068,0.000439097,-1.09291e-07,-2.62901e-12,0.983507,0.000438879,-1.09299e-07,-1.98234e-08,0.983946,0.000438601,-1.68769e-07,1.96916e-08,0.984384,0.000438322,-1.09694e-07,6.6157e-10,0.984823,0.000438105,-1.0771e-07,-2.23379e-08,0.985261,0.000437823,-1.74723e-07,2.90855e-08,0.985698,0.00043756,-8.74669e-08,-3.43992e-08,0.986136,0.000437282,-1.90665e-07,4.89068e-08,0.986573,0.000437048,-4.39442e-08,-4.20188e-08,0.98701,0.000436834,-1.7e-07,-4.11073e-11,0.987446,0.000436494,-1.70124e-07,4.21832e-08,0.987883,0.00043628,-4.35742e-08,-4.94824e-08,0.988319,0.000436044,-1.92021e-07,3.6537e-08,0.988755,0.00043577,-8.24102e-08,-3.70611e-08,0.989191,0.000435494,-1.93593e-07,5.21026e-08,0.989626,0.000435263,-3.72855e-08,-5.21402e-08,0.990061,0.000435032,-1.93706e-07,3.7249e-08,0.990496,0.000434756,-8.19592e-08,-3.72512e-08,0.990931,0.000434481,-1.93713e-07,5.21511e-08,0.991365,0.00043425,-3.72595e-08,-5.21439e-08,0.991799,0.000434019,-1.93691e-07,3.72152e-08,0.992233,0.000433743,-8.20456e-08,-3.71123e-08,0.992667,0.000433468,-1.93382e-07,5.16292e-08,0.9931,0.000433236,-3.84947e-08,-5.01953e-08,0.993533,0.000433008,-1.89081e-07,2.99427e-08,0.993966,0.00043272,-9.92525e-08,-9.9708e-09,0.994399,0.000432491,-1.29165e-07,9.94051e-09,0.994831,0.000432263,-9.93434e-08,-2.97912e-08,0.995263,0.000431975,-1.88717e-07,4.96198e-08,0.995695,0.000431746,-3.98578e-08,-4.94785e-08,0.996127,0.000431518,-1.88293e-07,2.9085e-08,0.996558,0.000431229,-1.01038e-07,-7.25675e-09,0.996989,0.000431005,-1.22809e-07,-5.79945e-11,0.99742,0.000430759,-1.22983e-07,7.48873e-09,0.997851,0.000430536,-1.00516e-07,-2.98969e-08,0.998281,0.000430245,-1.90207e-07,5.24942e-08,0.998711,0.000430022,-3.27246e-08,-6.08706e-08,0.999141,0.000429774,-2.15336e-07,7.17788e-08,0.999571,0.000429392,0.,0.};\n\n        template <bool srgb, int blueIdx, typename T, typename D>\n        __device__ __forceinline__ void Lab2RGBConvert_f(const T& src, D& dst)\n        {\n            const float lThresh = 0.008856f * 903.3f;\n            const float fThresh = 7.787f * 0.008856f + 16.0f / 116.0f;\n\n            float Y, fy;\n\n            if (src.x <= lThresh)\n            {\n                Y = src.x / 903.3f;\n                fy = 7.787f * Y + 16.0f / 116.0f;\n            }\n            else\n            {\n                fy = (src.x + 16.0f) / 116.0f;\n                Y = fy * fy * fy;\n            }\n\n            float X = src.y / 500.0f + fy;\n            float Z = fy - src.z / 200.0f;\n\n            if (X <= fThresh)\n                X = (X - 16.0f / 116.0f) / 7.787f;\n            else\n                X = X * X * X;\n\n            if (Z <= fThresh)\n                Z = (Z - 16.0f / 116.0f) / 7.787f;\n            else\n                Z = Z * Z * Z;\n\n            float B = 0.052891f * X - 0.204043f * Y + 1.151152f * Z;\n            float G = -0.921235f * X + 1.875991f * Y + 0.045244f * Z;\n            float R = 3.079933f * X - 1.537150f * Y - 0.542782f * Z;\n\n            if (srgb)\n            {\n                B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);\n                G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);\n                R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);\n            }\n\n            dst.x = blueIdx == 0 ? B : R;\n            dst.y = G;\n            dst.z = blueIdx == 0 ? R : B;\n            setAlpha(dst, ColorChannel<float>::max());\n        }\n\n        template <bool srgb, int blueIdx, typename T, typename D>\n        __device__ __forceinline__ void Lab2RGBConvert_b(const T& src, D& dst)\n        {\n            float3 srcf, dstf;\n\n            srcf.x = src.x * (100.f / 255.f);\n            srcf.y = src.y - 128;\n            srcf.z = src.z - 128;\n\n            Lab2RGBConvert_f<srgb, blueIdx>(srcf, dstf);\n\n            dst.x = saturate_cast<uchar>(dstf.x * 255.f);\n            dst.y = saturate_cast<uchar>(dstf.y * 255.f);\n            dst.z = saturate_cast<uchar>(dstf.z * 255.f);\n            setAlpha(dst, ColorChannel<uchar>::max());\n        }\n\n        template <typename T, int scn, int dcn, bool srgb, int blueIdx> struct Lab2RGB;\n        template <int scn, int dcn, bool srgb, int blueIdx>\n        struct Lab2RGB<uchar, scn, dcn, srgb, blueIdx>\n            : unary_function<typename TypeVec<uchar, scn>::vec_type, typename TypeVec<uchar, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<uchar, dcn>::vec_type operator ()(const typename TypeVec<uchar, scn>::vec_type& src) const\n            {\n                typename TypeVec<uchar, dcn>::vec_type dst;\n\n                Lab2RGBConvert_b<srgb, blueIdx>(src, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ Lab2RGB() {}\n            __host__ __device__ __forceinline__ Lab2RGB(const Lab2RGB&) {}\n        };\n        template <int scn, int dcn, bool srgb, int blueIdx>\n        struct Lab2RGB<float, scn, dcn, srgb, blueIdx>\n            : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const\n            {\n                typename TypeVec<float, dcn>::vec_type dst;\n\n                Lab2RGBConvert_f<srgb, blueIdx>(src, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ Lab2RGB() {}\n            __host__ __device__ __forceinline__ Lab2RGB(const Lab2RGB&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::Lab2RGB<T, scn, dcn, srgb, blueIdx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n///////////////////////////////////// RGB <-> Luv /////////////////////////////////////\n\n    namespace color_detail\n    {\n        __constant__ float c_LabCbrtTab[] = {0.137931,0.0114066,0.,1.18859e-07,0.149338,0.011407,3.56578e-07,-5.79396e-07,0.160745,0.0114059,-1.38161e-06,2.16892e-06,0.172151,0.0114097,5.12516e-06,-8.0814e-06,0.183558,0.0113957,-1.9119e-05,3.01567e-05,0.194965,0.0114479,7.13509e-05,-0.000112545,0.206371,0.011253,-0.000266285,-0.000106493,0.217252,0.0104009,-0.000585765,7.32149e-05,0.22714,0.00944906,-0.00036612,1.21917e-05,0.236235,0.0087534,-0.000329545,2.01753e-05,0.244679,0.00815483,-0.000269019,1.24435e-05,0.252577,0.00765412,-0.000231689,1.05618e-05,0.26001,0.00722243,-0.000200003,8.26662e-06,0.267041,0.00684723,-0.000175203,6.76746e-06,0.27372,0.00651712,-0.000154901,5.61192e-06,0.280088,0.00622416,-0.000138065,4.67009e-06,0.286179,0.00596204,-0.000124055,3.99012e-06,0.292021,0.0057259,-0.000112085,3.36032e-06,0.297638,0.00551181,-0.000102004,2.95338e-06,0.30305,0.00531666,-9.31435e-05,2.52875e-06,0.308277,0.00513796,-8.55572e-05,2.22022e-06,0.313331,0.00497351,-7.88966e-05,1.97163e-06,0.318228,0.00482163,-7.29817e-05,1.7248e-06,0.322978,0.00468084,-6.78073e-05,1.55998e-06,0.327593,0.0045499,-6.31274e-05,1.36343e-06,0.332081,0.00442774,-5.90371e-05,1.27136e-06,0.336451,0.00431348,-5.5223e-05,1.09111e-06,0.34071,0.00420631,-5.19496e-05,1.0399e-06,0.344866,0.00410553,-4.88299e-05,9.18347e-07,0.348923,0.00401062,-4.60749e-05,8.29942e-07,0.352889,0.00392096,-4.35851e-05,7.98478e-07,0.356767,0.00383619,-4.11896e-05,6.84917e-07,0.360562,0.00375586,-3.91349e-05,6.63976e-07,0.36428,0.00367959,-3.7143e-05,5.93086e-07,0.367923,0.00360708,-3.53637e-05,5.6976e-07,0.371495,0.00353806,-3.36544e-05,4.95533e-07,0.375,0.00347224,-3.21678e-05,4.87951e-07,0.378441,0.00340937,-3.0704e-05,4.4349e-07,0.38182,0.00334929,-2.93735e-05,4.20297e-07,0.38514,0.0032918,-2.81126e-05,3.7872e-07,0.388404,0.00323671,-2.69764e-05,3.596e-07,0.391614,0.00318384,-2.58976e-05,3.5845e-07,0.394772,0.00313312,-2.48223e-05,2.92765e-07,0.397881,0.00308435,-2.3944e-05,3.18232e-07,0.400942,0.00303742,-2.29893e-05,2.82046e-07,0.403957,0.00299229,-2.21432e-05,2.52315e-07,0.406927,0.00294876,-2.13862e-05,2.58416e-07,0.409855,0.00290676,-2.0611e-05,2.33939e-07,0.412741,0.00286624,-1.99092e-05,2.36342e-07,0.415587,0.00282713,-1.92001e-05,1.916e-07,0.418396,0.00278931,-1.86253e-05,2.1915e-07,0.421167,0.00275271,-1.79679e-05,1.83498e-07,0.423901,0.00271733,-1.74174e-05,1.79343e-07,0.426602,0.00268303,-1.68794e-05,1.72013e-07,0.429268,0.00264979,-1.63633e-05,1.75686e-07,0.431901,0.00261759,-1.58363e-05,1.3852e-07,0.434503,0.00258633,-1.54207e-05,1.64304e-07,0.437074,0.00255598,-1.49278e-05,1.28136e-07,0.439616,0.00252651,-1.45434e-05,1.57618e-07,0.442128,0.0024979,-1.40705e-05,1.0566e-07,0.444612,0.00247007,-1.37535e-05,1.34998e-07,0.447068,0.00244297,-1.33485e-05,1.29207e-07,0.449498,0.00241666,-1.29609e-05,9.32347e-08,0.451902,0.00239102,-1.26812e-05,1.23703e-07,0.45428,0.00236603,-1.23101e-05,9.74072e-08,0.456634,0.0023417,-1.20179e-05,1.12518e-07,0.458964,0.002318,-1.16803e-05,7.83681e-08,0.46127,0.00229488,-1.14452e-05,1.10452e-07,0.463554,0.00227232,-1.11139e-05,7.58719e-08,0.465815,0.00225032,-1.08863e-05,9.2699e-08,0.468055,0.00222882,-1.06082e-05,8.97738e-08,0.470273,0.00220788,-1.03388e-05,5.4845e-08,0.47247,0.00218736,-1.01743e-05,1.0808e-07,0.474648,0.00216734,-9.85007e-06,4.9277e-08,0.476805,0.00214779,-9.70224e-06,8.22408e-08,0.478943,0.00212863,-9.45551e-06,6.87942e-08,0.481063,0.00210993,-9.24913e-06,5.98144e-08,0.483163,0.00209161,-9.06969e-06,7.93789e-08,0.485246,0.00207371,-8.83155e-06,3.99032e-08,0.487311,0.00205616,-8.71184e-06,8.88325e-08,0.489358,0.002039,-8.44534e-06,2.20004e-08,0.491389,0.00202218,-8.37934e-06,9.13872e-08,0.493403,0.0020057,-8.10518e-06,2.96829e-08,0.495401,0.00198957,-8.01613e-06,5.81028e-08,0.497382,0.00197372,-7.84183e-06,6.5731e-08,0.499348,0.00195823,-7.64463e-06,3.66019e-08,0.501299,0.00194305,-7.53483e-06,2.62811e-08,0.503234,0.00192806,-7.45598e-06,9.66907e-08,0.505155,0.00191344,-7.16591e-06,4.18928e-09,0.507061,0.00189912,-7.15334e-06,6.53665e-08,0.508953,0.00188501,-6.95724e-06,3.23686e-08,0.510831,0.00187119,-6.86014e-06,4.35774e-08,0.512696,0.0018576,-6.72941e-06,3.17406e-08,0.514547,0.00184424,-6.63418e-06,6.78785e-08,0.516384,0.00183117,-6.43055e-06,-5.23126e-09,0.518209,0.0018183,-6.44624e-06,7.22562e-08,0.520021,0.00180562,-6.22947e-06,1.42292e-08,0.52182,0.0017932,-6.18679e-06,4.9641e-08,0.523607,0.00178098,-6.03786e-06,2.56259e-08,0.525382,0.00176898,-5.96099e-06,2.66696e-08,0.527145,0.00175714,-5.88098e-06,4.65094e-08,0.528897,0.00174552,-5.74145e-06,2.57114e-08,0.530637,0.00173411,-5.66431e-06,2.94588e-08,0.532365,0.00172287,-5.57594e-06,3.52667e-08,0.534082,0.00171182,-5.47014e-06,8.28868e-09,0.535789,0.00170091,-5.44527e-06,5.07871e-08,0.537484,0.00169017,-5.29291e-06,2.69817e-08,0.539169,0.00167967,-5.21197e-06,2.01009e-08,0.540844,0.0016693,-5.15166e-06,1.18237e-08,0.542508,0.00165903,-5.11619e-06,5.18135e-08,0.544162,0.00164896,-4.96075e-06,1.9341e-08,0.545806,0.00163909,-4.90273e-06,-9.96867e-09,0.54744,0.00162926,-4.93263e-06,8.01382e-08,0.549064,0.00161963,-4.69222e-06,-1.25601e-08,0.550679,0.00161021,-4.7299e-06,2.97067e-08,0.552285,0.00160084,-4.64078e-06,1.29426e-08,0.553881,0.0015916,-4.60195e-06,3.77327e-08,0.555468,0.00158251,-4.48875e-06,1.49412e-08,0.557046,0.00157357,-4.44393e-06,2.17118e-08,0.558615,0.00156475,-4.3788e-06,1.74206e-08,0.560176,0.00155605,-4.32653e-06,2.78152e-08,0.561727,0.00154748,-4.24309e-06,-9.47239e-09,0.563271,0.00153896,-4.27151e-06,6.9679e-08,0.564805,0.00153063,-4.06247e-06,-3.08246e-08,0.566332,0.00152241,-4.15494e-06,5.36188e-08,0.56785,0.00151426,-3.99409e-06,-4.83594e-09,0.56936,0.00150626,-4.00859e-06,2.53293e-08,0.570863,0.00149832,-3.93261e-06,2.27286e-08,0.572357,0.00149052,-3.86442e-06,2.96541e-09,0.573844,0.0014828,-3.85552e-06,2.50147e-08,0.575323,0.00147516,-3.78048e-06,1.61842e-08,0.576794,0.00146765,-3.73193e-06,2.94582e-08,0.578258,0.00146028,-3.64355e-06,-1.48076e-08,0.579715,0.00145295,-3.68798e-06,2.97724e-08,0.581164,0.00144566,-3.59866e-06,1.49272e-08,0.582606,0.00143851,-3.55388e-06,2.97285e-08,0.584041,0.00143149,-3.46469e-06,-1.46323e-08,0.585469,0.00142451,-3.50859e-06,2.88004e-08,0.58689,0.00141758,-3.42219e-06,1.864e-08,0.588304,0.00141079,-3.36627e-06,1.58482e-08,0.589712,0.00140411,-3.31872e-06,-2.24279e-08,0.591112,0.00139741,-3.38601e-06,7.38639e-08,0.592507,0.00139085,-3.16441e-06,-3.46088e-08,0.593894,0.00138442,-3.26824e-06,4.96675e-09,0.595275,0.0013779,-3.25334e-06,7.4346e-08,0.59665,0.00137162,-3.0303e-06,-6.39319e-08,0.598019,0.00136536,-3.2221e-06,6.21725e-08,0.599381,0.00135911,-3.03558e-06,-5.94423e-09,0.600737,0.00135302,-3.05341e-06,2.12091e-08,0.602087,0.00134697,-2.98979e-06,-1.92876e-08,0.603431,0.00134094,-3.04765e-06,5.5941e-08,0.604769,0.00133501,-2.87983e-06,-2.56622e-08,0.606101,0.00132917,-2.95681e-06,4.67078e-08,0.607427,0.0013234,-2.81669e-06,-4.19592e-08,0.608748,0.00131764,-2.94257e-06,6.15243e-08,0.610062,0.00131194,-2.75799e-06,-2.53244e-08,0.611372,0.00130635,-2.83397e-06,3.97739e-08,0.612675,0.0013008,-2.71465e-06,-1.45618e-08,0.613973,0.00129533,-2.75833e-06,1.84733e-08,0.615266,0.00128986,-2.70291e-06,2.73606e-10,0.616553,0.00128446,-2.70209e-06,4.00367e-08,0.617835,0.00127918,-2.58198e-06,-4.12113e-08,0.619111,0.00127389,-2.70561e-06,6.52039e-08,0.620383,0.00126867,-2.51e-06,-4.07901e-08,0.621649,0.00126353,-2.63237e-06,3.83516e-08,0.62291,0.00125838,-2.51732e-06,6.59315e-09,0.624166,0.00125337,-2.49754e-06,-5.11939e-09,0.625416,0.00124836,-2.5129e-06,1.38846e-08,0.626662,0.00124337,-2.47124e-06,9.18514e-09,0.627903,0.00123846,-2.44369e-06,8.97952e-09,0.629139,0.0012336,-2.41675e-06,1.45012e-08,0.63037,0.00122881,-2.37325e-06,-7.37949e-09,0.631597,0.00122404,-2.39538e-06,1.50169e-08,0.632818,0.00121929,-2.35033e-06,6.91648e-09,0.634035,0.00121461,-2.32958e-06,1.69219e-08,0.635248,0.00121,-2.27882e-06,-1.49997e-08,0.636455,0.0012054,-2.32382e-06,4.30769e-08,0.637659,0.00120088,-2.19459e-06,-3.80986e-08,0.638857,0.00119638,-2.30888e-06,4.97134e-08,0.640051,0.00119191,-2.15974e-06,-4.15463e-08,0.641241,0.00118747,-2.28438e-06,5.68667e-08,0.642426,0.00118307,-2.11378e-06,-7.10641e-09,0.643607,0.00117882,-2.1351e-06,-2.8441e-08,0.644784,0.00117446,-2.22042e-06,6.12658e-08,0.645956,0.00117021,-2.03663e-06,-3.78083e-08,0.647124,0.00116602,-2.15005e-06,3.03627e-08,0.648288,0.00116181,-2.05896e-06,-2.40379e-08,0.649448,0.00115762,-2.13108e-06,6.57887e-08,0.650603,0.00115356,-1.93371e-06,-6.03028e-08,0.651755,0.00114951,-2.11462e-06,5.62134e-08,0.652902,0.00114545,-1.94598e-06,-4.53417e-08,0.654046,0.00114142,-2.082e-06,6.55489e-08,0.655185,0.00113745,-1.88536e-06,-3.80396e-08,0.656321,0.00113357,-1.99948e-06,2.70049e-08,0.657452,0.00112965,-1.91846e-06,-1.03755e-08,0.65858,0.00112578,-1.94959e-06,1.44973e-08,0.659704,0.00112192,-1.9061e-06,1.1991e-08,0.660824,0.00111815,-1.87012e-06,-2.85634e-09,0.66194,0.0011144,-1.87869e-06,-5.65782e-10,0.663053,0.00111064,-1.88039e-06,5.11947e-09,0.664162,0.0011069,-1.86503e-06,3.96924e-08,0.665267,0.00110328,-1.74595e-06,-4.46795e-08,0.666368,0.00109966,-1.87999e-06,1.98161e-08,0.667466,0.00109596,-1.82054e-06,2.502e-08,0.66856,0.00109239,-1.74548e-06,-6.86593e-10,0.669651,0.0010889,-1.74754e-06,-2.22739e-08,0.670738,0.00108534,-1.81437e-06,3.01776e-08,0.671821,0.0010818,-1.72383e-06,2.07732e-08,0.672902,0.00107841,-1.66151e-06,-5.36658e-08,0.673978,0.00107493,-1.82251e-06,7.46802e-08,0.675051,0.00107151,-1.59847e-06,-6.62411e-08,0.676121,0.00106811,-1.79719e-06,7.10748e-08,0.677188,0.00106473,-1.58397e-06,-3.92441e-08,0.678251,0.00106145,-1.7017e-06,2.62973e-08,0.679311,0.00105812,-1.62281e-06,-6.34035e-09,0.680367,0.00105486,-1.64183e-06,-9.36249e-10,0.68142,0.00105157,-1.64464e-06,1.00854e-08,0.68247,0.00104831,-1.61438e-06,2.01995e-08,0.683517,0.00104514,-1.55378e-06,-3.1279e-08,0.68456,0.00104194,-1.64762e-06,4.53114e-08,0.685601,0.00103878,-1.51169e-06,-3.07573e-08,0.686638,0.00103567,-1.60396e-06,1.81133e-08,0.687672,0.00103251,-1.54962e-06,1.79085e-08,0.688703,0.00102947,-1.49589e-06,-3.01428e-08,0.689731,0.00102639,-1.58632e-06,4.30583e-08,0.690756,0.00102334,-1.45715e-06,-2.28814e-08,0.691778,0.00102036,-1.52579e-06,-1.11373e-08,0.692797,0.00101727,-1.5592e-06,6.74305e-08,0.693812,0.00101436,-1.35691e-06,-7.97709e-08,0.694825,0.0010114,-1.59622e-06,7.28391e-08,0.695835,0.00100843,-1.37771e-06,-3.27715e-08,0.696842,0.00100558,-1.47602e-06,-1.35807e-09,0.697846,0.00100262,-1.48009e-06,3.82037e-08,0.698847,0.000999775,-1.36548e-06,-3.22474e-08,0.699846,0.000996948,-1.46223e-06,3.11809e-08,0.700841,0.000994117,-1.36868e-06,-3.28714e-08,0.701834,0.000991281,-1.4673e-06,4.07001e-08,0.702824,0.000988468,-1.3452e-06,-1.07197e-08,0.703811,0.000985746,-1.37736e-06,2.17866e-09,0.704795,0.000982998,-1.37082e-06,2.00521e-09,0.705777,0.000980262,-1.3648e-06,-1.01996e-08,0.706756,0.000977502,-1.3954e-06,3.87931e-08,0.707732,0.000974827,-1.27902e-06,-2.57632e-08,0.708706,0.000972192,-1.35631e-06,4.65513e-09,0.709676,0.000969493,-1.34235e-06,7.14257e-09,0.710645,0.00096683,-1.32092e-06,2.63791e-08,0.71161,0.000964267,-1.24178e-06,-5.30543e-08,0.712573,0.000961625,-1.40095e-06,6.66289e-08,0.713533,0.000959023,-1.20106e-06,-3.46474e-08,0.714491,0.000956517,-1.305e-06,1.23559e-08,0.715446,0.000953944,-1.26793e-06,-1.47763e-08,0.716399,0.000951364,-1.31226e-06,4.67494e-08,0.717349,0.000948879,-1.17201e-06,-5.3012e-08,0.718297,0.000946376,-1.33105e-06,4.60894e-08,0.719242,0.000943852,-1.19278e-06,-1.21366e-08,0.720185,0.00094143,-1.22919e-06,2.45673e-09,0.721125,0.000938979,-1.22182e-06,2.30966e-09,0.722063,0.000936543,-1.21489e-06,-1.16954e-08,0.722998,0.000934078,-1.24998e-06,4.44718e-08,0.723931,0.000931711,-1.11656e-06,-4.69823e-08,0.724861,0.000929337,-1.25751e-06,2.4248e-08,0.725789,0.000926895,-1.18477e-06,9.5949e-09,0.726715,0.000924554,-1.15598e-06,-3.02286e-09,0.727638,0.000922233,-1.16505e-06,2.49649e-09,0.72856,0.00091991,-1.15756e-06,-6.96321e-09,0.729478,0.000917575,-1.17845e-06,2.53564e-08,0.730395,0.000915294,-1.10238e-06,-3.48578e-08,0.731309,0.000912984,-1.20695e-06,5.44704e-08,0.732221,0.000910734,-1.04354e-06,-6.38144e-08,0.73313,0.000908455,-1.23499e-06,8.15781e-08,0.734038,0.00090623,-9.90253e-07,-8.3684e-08,0.734943,0.000903999,-1.2413e-06,7.43441e-08,0.735846,0.000901739,-1.01827e-06,-3.48787e-08,0.736746,0.000899598,-1.12291e-06,5.56596e-09,0.737645,0.000897369,-1.10621e-06,1.26148e-08,0.738541,0.000895194,-1.06837e-06,3.57935e-09,0.739435,0.000893068,-1.05763e-06,-2.69322e-08,0.740327,0.000890872,-1.13842e-06,4.45448e-08,0.741217,0.000888729,-1.00479e-06,-3.20376e-08,0.742105,0.000886623,-1.1009e-06,2.40011e-08,0.74299,0.000884493,-1.0289e-06,-4.36209e-09,0.743874,0.000882422,-1.04199e-06,-6.55268e-09,0.744755,0.000880319,-1.06164e-06,3.05728e-08,0.745634,0.000878287,-9.69926e-07,-5.61338e-08,0.746512,0.000876179,-1.13833e-06,7.4753e-08,0.747387,0.000874127,-9.14068e-07,-6.40644e-08,0.74826,0.000872106,-1.10626e-06,6.22955e-08,0.749131,0.000870081,-9.19375e-07,-6.59083e-08,0.75,0.000868044,-1.1171e-06,8.21284e-08,0.750867,0.000866056,-8.70714e-07,-8.37915e-08,0.751732,0.000864064,-1.12209e-06,7.42237e-08,0.752595,0.000862042,-8.99418e-07,-3.42894e-08,0.753456,0.00086014,-1.00229e-06,3.32955e-09,0.754315,0.000858146,-9.92297e-07,2.09712e-08,0.755173,0.000856224,-9.29384e-07,-2.76096e-08,0.756028,0.000854282,-1.01221e-06,2.98627e-08,0.756881,0.000852348,-9.22625e-07,-3.22365e-08,0.757733,0.000850406,-1.01933e-06,3.94786e-08,0.758582,0.000848485,-9.00898e-07,-6.46833e-09,0.75943,0.000846664,-9.20303e-07,-1.36052e-08,0.760275,0.000844783,-9.61119e-07,1.28447e-09,0.761119,0.000842864,-9.57266e-07,8.4674e-09,0.761961,0.000840975,-9.31864e-07,2.44506e-08,0.762801,0.000839185,-8.58512e-07,-4.6665e-08,0.763639,0.000837328,-9.98507e-07,4.30001e-08,0.764476,0.00083546,-8.69507e-07,-6.12609e-09,0.76531,0.000833703,-8.87885e-07,-1.84959e-08,0.766143,0.000831871,-9.43372e-07,2.05052e-08,0.766974,0.000830046,-8.81857e-07,-3.92026e-09,0.767803,0.000828271,-8.93618e-07,-4.82426e-09,0.768631,0.000826469,-9.0809e-07,2.32172e-08,0.769456,0.000824722,-8.38439e-07,-2.84401e-08,0.77028,0.00082296,-9.23759e-07,3.09386e-08,0.771102,0.000821205,-8.30943e-07,-3.57099e-08,0.771922,0.000819436,-9.38073e-07,5.22963e-08,0.772741,0.000817717,-7.81184e-07,-5.42658e-08,0.773558,0.000815992,-9.43981e-07,4.55579e-08,0.774373,0.000814241,-8.07308e-07,-8.75656e-09,0.775186,0.0008126,-8.33578e-07,-1.05315e-08,0.775998,0.000810901,-8.65172e-07,-8.72188e-09,0.776808,0.000809145,-8.91338e-07,4.54191e-08,0.777616,0.000807498,-7.5508e-07,-5.37454e-08,0.778423,0.000805827,-9.16317e-07,5.03532e-08,0.779228,0.000804145,-7.65257e-07,-2.84584e-08,0.780031,0.000802529,-8.50632e-07,3.87579e-09,0.780833,0.00080084,-8.39005e-07,1.29552e-08,0.781633,0.0007992,-8.00139e-07,3.90804e-09,0.782432,0.000797612,-7.88415e-07,-2.85874e-08,0.783228,0.000795949,-8.74177e-07,5.0837e-08,0.784023,0.000794353,-7.21666e-07,-5.55513e-08,0.784817,0.000792743,-8.8832e-07,5.21587e-08,0.785609,0.000791123,-7.31844e-07,-3.38744e-08,0.786399,0.000789558,-8.33467e-07,2.37342e-08,0.787188,0.000787962,-7.62264e-07,-1.45775e-09,0.787975,0.000786433,-7.66638e-07,-1.79034e-08,0.788761,0.000784846,-8.20348e-07,1.34665e-08,0.789545,0.000783246,-7.79948e-07,2.3642e-08,0.790327,0.000781757,-7.09022e-07,-4.84297e-08,0.791108,0.000780194,-8.54311e-07,5.08674e-08,0.791888,0.000778638,-7.01709e-07,-3.58303e-08,0.792666,0.000777127,-8.092e-07,3.28493e-08,0.793442,0.000775607,-7.10652e-07,-3.59624e-08,0.794217,0.000774078,-8.1854e-07,5.13959e-08,0.79499,0.000772595,-6.64352e-07,-5.04121e-08,0.795762,0.000771115,-8.15588e-07,3.10431e-08,0.796532,0.000769577,-7.22459e-07,-1.41557e-08,0.797301,0.00076809,-7.64926e-07,2.55795e-08,0.798069,0.000766636,-6.88187e-07,-2.85578e-08,0.798835,0.000765174,-7.73861e-07,2.90472e-08,0.799599,0.000763714,-6.86719e-07,-2.80262e-08,0.800362,0.000762256,-7.70798e-07,2.34531e-08,0.801123,0.000760785,-7.00438e-07,-6.18144e-09,0.801884,0.000759366,-7.18983e-07,1.27263e-09,0.802642,0.000757931,-7.15165e-07,1.09101e-09,0.803399,0.000756504,-7.11892e-07,-5.63675e-09,0.804155,0.000755064,-7.28802e-07,2.14559e-08,0.80491,0.00075367,-6.64434e-07,-2.05821e-08,0.805663,0.00075228,-7.26181e-07,1.26812e-09,0.806414,0.000750831,-7.22377e-07,1.55097e-08,0.807164,0.000749433,-6.75848e-07,-3.70216e-09,0.807913,0.00074807,-6.86954e-07,-7.0105e-10,0.80866,0.000746694,-6.89057e-07,6.5063e-09,0.809406,0.000745336,-6.69538e-07,-2.53242e-08,0.810151,0.000743921,-7.45511e-07,3.51858e-08,0.810894,0.000742535,-6.39953e-07,3.79034e-09,0.811636,0.000741267,-6.28582e-07,-5.03471e-08,0.812377,0.000739858,-7.79624e-07,7.83886e-08,0.813116,0.000738534,-5.44458e-07,-8.43935e-08,0.813854,0.000737192,-7.97638e-07,8.03714e-08,0.81459,0.000735838,-5.56524e-07,-5.82784e-08,0.815325,0.00073455,-7.31359e-07,3.35329e-08,0.816059,0.000733188,-6.3076e-07,-1.62486e-08,0.816792,0.000731878,-6.79506e-07,3.14614e-08,0.817523,0.000730613,-5.85122e-07,-4.99925e-08,0.818253,0.000729293,-7.35099e-07,4.92994e-08,0.818982,0.000727971,-5.87201e-07,-2.79959e-08,0.819709,0.000726712,-6.71189e-07,3.07959e-09,0.820435,0.000725379,-6.6195e-07,1.56777e-08,0.82116,0.000724102,-6.14917e-07,-6.18564e-09,0.821883,0.000722854,-6.33474e-07,9.06488e-09,0.822606,0.000721614,-6.06279e-07,-3.00739e-08,0.823327,0.000720311,-6.96501e-07,5.16262e-08,0.824046,0.000719073,-5.41623e-07,-5.72214e-08,0.824765,0.000717818,-7.13287e-07,5.80503e-08,0.825482,0.000716566,-5.39136e-07,-5.57703e-08,0.826198,0.00071532,-7.06447e-07,4.58215e-08,0.826912,0.000714045,-5.68983e-07,-8.30636e-09,0.827626,0.000712882,-5.93902e-07,-1.25961e-08,0.828338,0.000711656,-6.3169e-07,-9.13985e-10,0.829049,0.00071039,-6.34432e-07,1.62519e-08,0.829759,0.00070917,-5.85676e-07,-4.48904e-09,0.830468,0.000707985,-5.99143e-07,1.70418e-09,0.831175,0.000706792,-5.9403e-07,-2.32768e-09,0.831881,0.000705597,-6.01014e-07,7.60648e-09,0.832586,0.000704418,-5.78194e-07,-2.80982e-08,0.83329,0.000703177,-6.62489e-07,4.51817e-08,0.833993,0.000701988,-5.26944e-07,-3.34192e-08,0.834694,0.000700834,-6.27201e-07,2.88904e-08,0.835394,0.000699666,-5.4053e-07,-2.25378e-08,0.836093,0.000698517,-6.08143e-07,1.65589e-09,0.836791,0.000697306,-6.03176e-07,1.59142e-08,0.837488,0.000696147,-5.55433e-07,-5.70801e-09,0.838184,0.000695019,-5.72557e-07,6.91792e-09,0.838878,0.000693895,-5.51803e-07,-2.19637e-08,0.839571,0.000692725,-6.17694e-07,2.13321e-08,0.840263,0.000691554,-5.53698e-07,-3.75996e-09,0.840954,0.000690435,-5.64978e-07,-6.29219e-09,0.841644,0.000689287,-5.83855e-07,2.89287e-08,0.842333,0.000688206,-4.97068e-07,-4.98181e-08,0.843021,0.000687062,-6.46523e-07,5.11344e-08,0.843707,0.000685922,-4.9312e-07,-3.55102e-08,0.844393,0.00068483,-5.9965e-07,3.13019e-08,0.845077,0.000683724,-5.05745e-07,-3.00925e-08,0.84576,0.000682622,-5.96022e-07,2.94636e-08,0.846442,0.000681519,-5.07631e-07,-2.81572e-08,0.847123,0.000680419,-5.92103e-07,2.35606e-08,0.847803,0.000679306,-5.21421e-07,-6.48045e-09,0.848482,0.000678243,-5.40863e-07,2.36124e-09,0.849159,0.000677169,-5.33779e-07,-2.96461e-09,0.849836,0.000676092,-5.42673e-07,9.49728e-09,0.850512,0.000675035,-5.14181e-07,-3.50245e-08,0.851186,0.000673902,-6.19254e-07,7.09959e-08,0.851859,0.000672876,-4.06267e-07,-7.01453e-08,0.852532,0.000671853,-6.16703e-07,3.07714e-08,0.853203,0.000670712,-5.24388e-07,6.66423e-09,0.853873,0.000669684,-5.04396e-07,2.17629e-09,0.854542,0.000668681,-4.97867e-07,-1.53693e-08,0.855211,0.000667639,-5.43975e-07,-3.03752e-10,0.855878,0.000666551,-5.44886e-07,1.65844e-08,0.856544,0.000665511,-4.95133e-07,-6.42907e-09,0.857209,0.000664501,-5.1442e-07,9.13195e-09,0.857873,0.0006635,-4.87024e-07,-3.00987e-08,0.858536,0.000662435,-5.7732e-07,5.16584e-08,0.859198,0.000661436,-4.22345e-07,-5.73255e-08,0.859859,0.000660419,-5.94322e-07,5.84343e-08,0.860518,0.000659406,-4.19019e-07,-5.72022e-08,0.861177,0.000658396,-5.90626e-07,5.11653e-08,0.861835,0.000657368,-4.3713e-07,-2.82495e-08,0.862492,0.000656409,-5.21878e-07,2.22788e-09,0.863148,0.000655372,-5.15195e-07,1.9338e-08,0.863803,0.0006544,-4.5718e-07,-1.99754e-08,0.864457,0.000653425,-5.17107e-07,9.59024e-10,0.86511,0.000652394,-5.1423e-07,1.61393e-08,0.865762,0.000651414,-4.65812e-07,-5.91149e-09,0.866413,0.000650465,-4.83546e-07,7.50665e-09,0.867063,0.00064952,-4.61026e-07,-2.4115e-08,0.867712,0.000648526,-5.33371e-07,2.93486e-08,0.86836,0.000647547,-4.45325e-07,-3.36748e-08,0.869007,0.000646555,-5.4635e-07,4.57461e-08,0.869653,0.0006456,-4.09112e-07,-3.01002e-08,0.870298,0.000644691,-4.99412e-07,1.50501e-08,0.870942,0.000643738,-4.54262e-07,-3.01002e-08,0.871585,0.000642739,-5.44563e-07,4.57461e-08,0.872228,0.000641787,-4.07324e-07,-3.36748e-08,0.872869,0.000640871,-5.08349e-07,2.93486e-08,0.873509,0.000639943,-4.20303e-07,-2.4115e-08,0.874149,0.00063903,-4.92648e-07,7.50655e-09,0.874787,0.000638067,-4.70128e-07,-5.91126e-09,0.875425,0.000637109,-4.87862e-07,1.61385e-08,0.876062,0.000636182,-4.39447e-07,9.61961e-10,0.876697,0.000635306,-4.36561e-07,-1.99863e-08,0.877332,0.000634373,-4.9652e-07,1.93785e-08,0.877966,0.000633438,-4.38384e-07,2.07697e-09,0.878599,0.000632567,-4.32153e-07,-2.76864e-08,0.879231,0.00063162,-5.15212e-07,4.90641e-08,0.879862,0.000630737,-3.6802e-07,-4.93606e-08,0.880493,0.000629852,-5.16102e-07,2.9169e-08,0.881122,0.000628908,-4.28595e-07,-7.71083e-09,0.881751,0.000628027,-4.51727e-07,1.6744e-09,0.882378,0.000627129,-4.46704e-07,1.01317e-09,0.883005,0.000626239,-4.43665e-07,-5.72703e-09,0.883631,0.000625334,-4.60846e-07,2.1895e-08,0.884255,0.000624478,-3.95161e-07,-2.22481e-08,0.88488,0.000623621,-4.61905e-07,7.4928e-09,0.885503,0.00062272,-4.39427e-07,-7.72306e-09,0.886125,0.000621818,-4.62596e-07,2.33995e-08,0.886746,0.000620963,-3.92398e-07,-2.62704e-08,0.887367,0.000620099,-4.71209e-07,2.20775e-08,0.887987,0.000619223,-4.04976e-07,-2.43496e-09,0.888605,0.000618406,-4.12281e-07,-1.23377e-08,0.889223,0.000617544,-4.49294e-07,-7.81876e-09,0.88984,0.000616622,-4.72751e-07,4.36128e-08,0.890457,0.000615807,-3.41912e-07,-4.7423e-08,0.891072,0.000614981,-4.84181e-07,2.68698e-08,0.891687,0.000614093,-4.03572e-07,-4.51384e-10,0.8923,0.000613285,-4.04926e-07,-2.50643e-08,0.892913,0.0006124,-4.80119e-07,4.11038e-08,0.893525,0.000611563,-3.56808e-07,-2.01414e-08,0.894136,0.000610789,-4.17232e-07,-2.01426e-08,0.894747,0.000609894,-4.7766e-07,4.11073e-08,0.895356,0.000609062,-3.54338e-07,-2.50773e-08,0.895965,0.000608278,-4.2957e-07,-4.02954e-10,0.896573,0.000607418,-4.30779e-07,2.66891e-08,0.89718,0.000606636,-3.50711e-07,-4.67489e-08,0.897786,0.000605795,-4.90958e-07,4.10972e-08,0.898391,0.000604936,-3.67666e-07,1.56948e-09,0.898996,0.000604205,-3.62958e-07,-4.73751e-08,0.8996,0.000603337,-5.05083e-07,6.87214e-08,0.900202,0.000602533,-2.98919e-07,-4.86966e-08,0.900805,0.000601789,-4.45009e-07,6.85589e-09,0.901406,0.00060092,-4.24441e-07,2.1273e-08,0.902007,0.000600135,-3.60622e-07,-3.23434e-08,0.902606,0.000599317,-4.57652e-07,4.84959e-08,0.903205,0.000598547,-3.12164e-07,-4.24309e-08,0.903803,0.000597795,-4.39457e-07,2.01844e-09,0.904401,0.000596922,-4.33402e-07,3.43571e-08,0.904997,0.000596159,-3.30331e-07,-2.02374e-08,0.905593,0.000595437,-3.91043e-07,-1.30123e-08,0.906188,0.000594616,-4.3008e-07,1.26819e-08,0.906782,0.000593794,-3.92034e-07,2.18894e-08,0.907376,0.000593076,-3.26366e-07,-4.06349e-08,0.907968,0.000592301,-4.4827e-07,2.1441e-08,0.90856,0.000591469,-3.83947e-07,1.44754e-08,0.909151,0.000590744,-3.40521e-07,-1.97379e-08,0.909742,0.000590004,-3.99735e-07,4.87161e-09,0.910331,0.000589219,-3.8512e-07,2.51532e-10,0.91092,0.00058845,-3.84366e-07,-5.87776e-09,0.911508,0.000587663,-4.01999e-07,2.32595e-08,0.912096,0.000586929,-3.3222e-07,-2.75554e-08,0.912682,0.000586182,-4.14887e-07,2.73573e-08,0.913268,0.000585434,-3.32815e-07,-2.22692e-08,0.913853,0.000584702,-3.99622e-07,2.11486e-09,0.914437,0.000583909,-3.93278e-07,1.38098e-08,0.915021,0.000583164,-3.51848e-07,2.25042e-09,0.915604,0.000582467,-3.45097e-07,-2.28115e-08,0.916186,0.000581708,-4.13531e-07,2.93911e-08,0.916767,0.000580969,-3.25358e-07,-3.51481e-08,0.917348,0.000580213,-4.30803e-07,5.15967e-08,0.917928,0.000579506,-2.76012e-07,-5.20296e-08,0.918507,0.000578798,-4.32101e-07,3.73124e-08,0.919085,0.000578046,-3.20164e-07,-3.76154e-08,0.919663,0.000577293,-4.3301e-07,5.35447e-08,0.92024,0.000576587,-2.72376e-07,-5.7354e-08,0.920816,0.000575871,-4.44438e-07,5.66621e-08,0.921391,0.000575152,-2.74452e-07,-5.00851e-08,0.921966,0.000574453,-4.24707e-07,2.4469e-08,0.92254,0.000573677,-3.513e-07,1.18138e-08,0.923114,0.000573009,-3.15859e-07,-1.21195e-08,0.923686,0.000572341,-3.52217e-07,-2.29403e-08,0.924258,0.000571568,-4.21038e-07,4.4276e-08,0.924829,0.000570859,-2.8821e-07,-3.49546e-08,0.9254,0.000570178,-3.93074e-07,3.59377e-08,0.92597,0.000569499,-2.85261e-07,-4.91915e-08,0.926539,0.000568781,-4.32835e-07,4.16189e-08,0.927107,0.00056804,-3.07979e-07,1.92523e-09,0.927675,0.00056743,-3.02203e-07,-4.93198e-08,0.928242,0.000566678,-4.50162e-07,7.61447e-08,0.928809,0.000566006,-2.21728e-07,-7.6445e-08,0.929374,0.000565333,-4.51063e-07,5.08216e-08,0.929939,0.000564583,-2.98599e-07,-7.63212e-09,0.930503,0.000563963,-3.21495e-07,-2.02931e-08,0.931067,0.000563259,-3.82374e-07,2.92001e-08,0.93163,0.000562582,-2.94774e-07,-3.69025e-08,0.932192,0.000561882,-4.05482e-07,5.88053e-08,0.932754,0.000561247,-2.29066e-07,-7.91094e-08,0.933315,0.000560552,-4.66394e-07,7.88184e-08,0.933875,0.000559856,-2.29939e-07,-5.73501e-08,0.934434,0.000559224,-4.01989e-07,3.13727e-08,0.934993,0.000558514,-3.07871e-07,-8.53611e-09,0.935551,0.000557873,-3.33479e-07,2.77175e-09,0.936109,0.000557214,-3.25164e-07,-2.55091e-09,0.936666,0.000556556,-3.32817e-07,7.43188e-09,0.937222,0.000555913,-3.10521e-07,-2.71766e-08,0.937778,0.00055521,-3.92051e-07,4.167e-08,0.938333,0.000554551,-2.67041e-07,-2.02941e-08,0.938887,0.000553956,-3.27923e-07,-2.00984e-08,0.93944,0.00055324,-3.88218e-07,4.10828e-08,0.939993,0.000552587,-2.6497e-07,-2.50237e-08,0.940546,0.000551982,-3.40041e-07,-5.92583e-10,0.941097,0.0005513,-3.41819e-07,2.7394e-08,0.941648,0.000550698,-2.59637e-07,-4.93788e-08,0.942199,0.000550031,-4.07773e-07,5.09119e-08,0.942748,0.000549368,-2.55038e-07,-3.50595e-08,0.943297,0.000548753,-3.60216e-07,2.97214e-08,0.943846,0.000548122,-2.71052e-07,-2.42215e-08,0.944394,0.000547507,-3.43716e-07,7.55985e-09,0.944941,0.000546842,-3.21037e-07,-6.01796e-09,0.945487,0.000546182,-3.3909e-07,1.65119e-08,0.946033,0.000545553,-2.89555e-07,-4.2498e-10,0.946578,0.000544973,-2.9083e-07,-1.4812e-08,0.947123,0.000544347,-3.35266e-07,6.83068e-11,0.947667,0.000543676,-3.35061e-07,1.45388e-08,0.94821,0.00054305,-2.91444e-07,1.38123e-09,0.948753,0.000542471,-2.87301e-07,-2.00637e-08,0.949295,0.000541836,-3.47492e-07,1.92688e-08,0.949837,0.000541199,-2.89685e-07,2.59298e-09,0.950378,0.000540628,-2.81906e-07,-2.96407e-08,0.950918,0.000539975,-3.70829e-07,5.63652e-08,0.951458,0.000539402,-2.01733e-07,-7.66107e-08,0.951997,0.000538769,-4.31565e-07,7.12638e-08,0.952535,0.00053812,-2.17774e-07,-2.96305e-08,0.953073,0.000537595,-3.06665e-07,-1.23464e-08,0.95361,0.000536945,-3.43704e-07,1.94114e-08,0.954147,0.000536316,-2.8547e-07,-5.69451e-09,0.954683,0.000535728,-3.02554e-07,3.36666e-09,0.955219,0.000535133,-2.92454e-07,-7.77208e-09,0.955753,0.000534525,-3.1577e-07,2.77216e-08,0.956288,0.000533976,-2.32605e-07,-4.35097e-08,0.956821,0.00053338,-3.63134e-07,2.7108e-08,0.957354,0.000532735,-2.8181e-07,-5.31772e-09,0.957887,0.000532156,-2.97764e-07,-5.83718e-09,0.958419,0.000531543,-3.15275e-07,2.86664e-08,0.95895,0.000530998,-2.29276e-07,-4.9224e-08,0.959481,0.000530392,-3.76948e-07,4.90201e-08,0.960011,0.000529785,-2.29887e-07,-2.76471e-08,0.96054,0.000529243,-3.12829e-07,1.96385e-09,0.961069,0.000528623,-3.06937e-07,1.97917e-08,0.961598,0.000528068,-2.47562e-07,-2.15261e-08,0.962125,0.000527508,-3.1214e-07,6.70795e-09,0.962653,0.000526904,-2.92016e-07,-5.30573e-09,0.963179,0.000526304,-3.07934e-07,1.4515e-08,0.963705,0.000525732,-2.64389e-07,6.85048e-09,0.964231,0.000525224,-2.43837e-07,-4.19169e-08,0.964756,0.00052461,-3.69588e-07,4.1608e-08,0.96528,0.000523996,-2.44764e-07,-5.30598e-09,0.965804,0.000523491,-2.60682e-07,-2.03841e-08,0.966327,0.000522908,-3.21834e-07,2.72378e-08,0.966849,0.000522346,-2.40121e-07,-2.89625e-08,0.967371,0.000521779,-3.27008e-07,2.90075e-08,0.967893,0.000521212,-2.39986e-07,-2.74629e-08,0.968414,0.00052065,-3.22374e-07,2.12396e-08,0.968934,0.000520069,-2.58656e-07,2.10922e-09,0.969454,0.000519558,-2.52328e-07,-2.96765e-08,0.969973,0.000518964,-3.41357e-07,5.6992e-08,0.970492,0.000518452,-1.70382e-07,-7.90821e-08,0.97101,0.000517874,-4.07628e-07,8.05224e-08,0.971528,0.000517301,-1.66061e-07,-6.41937e-08,0.972045,0.000516776,-3.58642e-07,5.70429e-08,0.972561,0.00051623,-1.87513e-07,-4.47686e-08,0.973077,0.00051572,-3.21819e-07,2.82237e-09,0.973593,0.000515085,-3.13352e-07,3.34792e-08,0.974108,0.000514559,-2.12914e-07,-1.75298e-08,0.974622,0.000514081,-2.65503e-07,-2.29648e-08,0.975136,0.000513481,-3.34398e-07,4.97843e-08,0.975649,0.000512961,-1.85045e-07,-5.6963e-08,0.976162,0.00051242,-3.55934e-07,5.88585e-08,0.976674,0.000511885,-1.79359e-07,-5.92616e-08,0.977185,0.000511348,-3.57143e-07,5.89785e-08,0.977696,0.000510811,-1.80208e-07,-5.74433e-08,0.978207,0.000510278,-3.52538e-07,5.15854e-08,0.978717,0.000509728,-1.97781e-07,-2.9689e-08,0.979226,0.000509243,-2.86848e-07,7.56591e-09,0.979735,0.000508692,-2.64151e-07,-5.74649e-10,0.980244,0.000508162,-2.65875e-07,-5.26732e-09,0.980752,0.000507615,-2.81677e-07,2.16439e-08,0.981259,0.000507116,-2.16745e-07,-2.17037e-08,0.981766,0.000506618,-2.81856e-07,5.56636e-09,0.982272,0.000506071,-2.65157e-07,-5.61689e-10,0.982778,0.000505539,-2.66842e-07,-3.31963e-09,0.983283,0.000504995,-2.76801e-07,1.38402e-08,0.983788,0.000504483,-2.3528e-07,7.56339e-09,0.984292,0.000504035,-2.1259e-07,-4.40938e-08,0.984796,0.000503478,-3.44871e-07,4.96026e-08,0.985299,0.000502937,-1.96064e-07,-3.51071e-08,0.985802,0.000502439,-3.01385e-07,3.12212e-08,0.986304,0.00050193,-2.07721e-07,-3.0173e-08,0.986806,0.000501424,-2.9824e-07,2.9866e-08,0.987307,0.000500917,-2.08642e-07,-2.96865e-08,0.987808,0.000500411,-2.97702e-07,2.92753e-08,0.988308,0.000499903,-2.09876e-07,-2.78101e-08,0.988807,0.0004994,-2.93306e-07,2.23604e-08,0.989307,0.000498881,-2.26225e-07,-2.02681e-09,0.989805,0.000498422,-2.32305e-07,-1.42531e-08,0.990303,0.000497915,-2.75065e-07,-5.65232e-10,0.990801,0.000497363,-2.76761e-07,1.65141e-08,0.991298,0.000496859,-2.27218e-07,-5.88639e-09,0.991795,0.000496387,-2.44878e-07,7.0315e-09,0.992291,0.000495918,-2.23783e-07,-2.22396e-08,0.992787,0.000495404,-2.90502e-07,2.23224e-08,0.993282,0.00049489,-2.23535e-07,-7.44543e-09,0.993776,0.000494421,-2.45871e-07,7.45924e-09,0.994271,0.000493951,-2.23493e-07,-2.23915e-08,0.994764,0.000493437,-2.90668e-07,2.25021e-08,0.995257,0.000492923,-2.23161e-07,-8.01218e-09,0.99575,0.000492453,-2.47198e-07,9.54669e-09,0.996242,0.000491987,-2.18558e-07,-3.01746e-08,0.996734,0.000491459,-3.09082e-07,5.1547e-08,0.997225,0.000490996,-1.54441e-07,-5.68039e-08,0.997716,0.000490517,-3.24853e-07,5.64594e-08,0.998206,0.000490036,-1.55474e-07,-4.98245e-08,0.998696,0.000489576,-3.04948e-07,2.36292e-08,0.999186,0.000489037,-2.3406e-07,1.49121e-08,0.999674,0.000488613,-1.89324e-07,-2.3673e-08,1.00016,0.000488164,-2.60343e-07,2.01754e-08,1.00065,0.000487704,-1.99816e-07,-5.70288e-08,1.00114,0.000487133,-3.70903e-07,8.87303e-08,1.00162,0.000486657,-1.04712e-07,-5.94737e-08,1.00211,0.000486269,-2.83133e-07,2.99553e-08,1.0026,0.000485793,-1.93267e-07,-6.03474e-08,1.00308,0.000485225,-3.74309e-07,9.2225e-08,1.00357,0.000484754,-9.76345e-08,-7.0134e-08,1.00405,0.000484348,-3.08036e-07,6.91016e-08,1.00454,0.000483939,-1.00731e-07,-8.70633e-08,1.00502,0.000483476,-3.61921e-07,4.07328e-08,1.0055,0.000482875,-2.39723e-07,4.33413e-08,1.00599,0.000482525,-1.09699e-07,-9.48886e-08,1.00647,0.000482021,-3.94365e-07,9.77947e-08,1.00695,0.000481526,-1.00981e-07,-5.78713e-08,1.00743,0.00048115,-2.74595e-07,1.44814e-08,1.00791,0.000480645,-2.31151e-07,-5.42665e-11,1.00839,0.000480182,-2.31314e-07,-1.42643e-08,1.00887,0.000479677,-2.74106e-07,5.71115e-08,1.00935,0.0004793,-1.02772e-07,-9.49724e-08,1.00983,0.000478809,-3.87689e-07,8.43596e-08,1.01031,0.000478287,-1.3461e-07,-4.04755e-09,1.01079,0.000478006,-1.46753e-07,-6.81694e-08,1.01127,0.000477508,-3.51261e-07,3.83067e-08,1.01174,0.00047692,-2.36341e-07,3.41521e-08,1.01222,0.00047655,-1.33885e-07,-5.57058e-08,1.0127,0.000476115,-3.01002e-07,6.94616e-08,1.01317,0.000475721,-9.26174e-08,-1.02931e-07,1.01365,0.000475227,-4.01412e-07,1.03846e-07,1.01412,0.000474736,-8.98751e-08,-7.40321e-08,1.0146,0.000474334,-3.11971e-07,7.30735e-08,1.01507,0.00047393,-9.27508e-08,-9.90527e-08,1.01554,0.000473447,-3.89909e-07,8.47188e-08,1.01602,0.000472921,-1.35753e-07,-1.40381e-09,1.01649,0.000472645,-1.39964e-07,-7.91035e-08,1.01696,0.000472128,-3.77275e-07,7.93993e-08,1.01744,0.000471612,-1.39077e-07,-7.52607e-11,1.01791,0.000471334,-1.39302e-07,-7.90983e-08,1.01838,0.000470818,-3.76597e-07,7.80499e-08,1.01885,0.000470299,-1.42448e-07,5.31733e-09,1.01932,0.00047003,-1.26496e-07,-9.93193e-08,1.01979,0.000469479,-4.24453e-07,1.53541e-07,1.02026,0.00046909,3.617e-08,-1.57217e-07,1.02073,0.000468691,-4.35482e-07,1.177e-07,1.02119,0.000468173,-8.23808e-08,-7.51659e-08,1.02166,0.000467783,-3.07878e-07,6.37538e-08,1.02213,0.000467358,-1.16617e-07,-6.064e-08,1.0226,0.000466943,-2.98537e-07,5.9597e-08,1.02306,0.000466525,-1.19746e-07,-5.85386e-08,1.02353,0.00046611,-2.95362e-07,5.53482e-08,1.024,0.000465685,-1.29317e-07,-4.36449e-08,1.02446,0.000465296,-2.60252e-07,2.20268e-11,1.02493,0.000464775,-2.60186e-07,4.35568e-08,1.02539,0.000464386,-1.29516e-07,-5.50398e-08,1.02586,0.000463961,-2.94635e-07,5.73932e-08,1.02632,0.000463544,-1.22456e-07,-5.53236e-08,1.02678,0.000463133,-2.88426e-07,4.46921e-08,1.02725,0.000462691,-1.5435e-07,-4.23534e-09,1.02771,0.000462369,-1.67056e-07,-2.77507e-08,1.02817,0.000461952,-2.50308e-07,-3.97101e-09,1.02863,0.000461439,-2.62221e-07,4.36348e-08,1.02909,0.000461046,-1.31317e-07,-5.13589e-08,1.02955,0.000460629,-2.85394e-07,4.25913e-08,1.03001,0.000460186,-1.5762e-07,2.0285e-10,1.03047,0.000459871,-1.57011e-07,-4.34027e-08,1.03093,0.000459427,-2.87219e-07,5.41987e-08,1.03139,0.000459015,-1.24623e-07,-5.4183e-08,1.03185,0.000458604,-2.87172e-07,4.33239e-08,1.03231,0.000458159,-1.572e-07,9.65817e-11,1.03277,0.000457845,-1.56911e-07,-4.37103e-08,1.03323,0.0004574,-2.88041e-07,5.55351e-08,1.03368,0.000456991,-1.21436e-07,-5.9221e-08,1.03414,0.00045657,-2.99099e-07,6.21394e-08,1.0346,0.000456158,-1.1268e-07,-7.01275e-08,1.03505,0.000455723,-3.23063e-07,9.91614e-08,1.03551,0.000455374,-2.55788e-08,-8.80996e-08,1.03596,0.000455058,-2.89878e-07,1.48184e-08,1.03642,0.000454523,-2.45422e-07,2.88258e-08,1.03687,0.000454119,-1.58945e-07,-1.09125e-08,1.03733,0.000453768,-1.91682e-07,1.48241e-08,1.03778,0.000453429,-1.4721e-07,-4.83838e-08,1.03823,0.00045299,-2.92361e-07,5.95019e-08,1.03869,0.000452584,-1.13856e-07,-7.04146e-08,1.03914,0.000452145,-3.25099e-07,1.02947e-07,1.03959,0.000451803,-1.62583e-08,-1.02955e-07,1.04004,0.000451462,-3.25123e-07,7.04544e-08,1.04049,0.000451023,-1.1376e-07,-5.96534e-08,1.04094,0.000450616,-2.9272e-07,4.89499e-08,1.04139,0.000450178,-1.45871e-07,-1.69369e-08,1.04184,0.000449835,-1.96681e-07,1.87977e-08,1.04229,0.000449498,-1.40288e-07,-5.82539e-08,1.04274,0.000449043,-3.1505e-07,9.50087e-08,1.04319,0.000448698,-3.00238e-08,-8.33623e-08,1.04364,0.000448388,-2.80111e-07,2.20363e-11,1.04409,0.000447828,-2.80045e-07,8.32742e-08,1.04454,0.000447517,-3.02221e-08,-9.47002e-08,1.04498,0.000447173,-3.14323e-07,5.7108e-08,1.04543,0.000446716,-1.42999e-07,-1.45225e-08,1.04588,0.000446386,-1.86566e-07,9.82022e-10,1.04632,0.000446016,-1.8362e-07,1.05944e-08,1.04677,0.00044568,-1.51837e-07,-4.33597e-08,1.04721,0.000445247,-2.81916e-07,4.36352e-08,1.04766,0.000444814,-1.51011e-07,-1.19717e-08,1.0481,0.000444476,-1.86926e-07,4.25158e-09,1.04855,0.000444115,-1.74171e-07,-5.03461e-09,1.04899,0.000443751,-1.89275e-07,1.58868e-08,1.04944,0.00044342,-1.41614e-07,-5.85127e-08,1.04988,0.000442961,-3.17152e-07,9.89548e-08,1.05032,0.000442624,-2.0288e-08,-9.88878e-08,1.05076,0.000442287,-3.16951e-07,5.81779e-08,1.05121,0.000441827,-1.42418e-07,-1.46144e-08,1.05165,0.000441499,-1.86261e-07,2.79892e-10,1.05209,0.000441127,-1.85421e-07,1.34949e-08,1.05253,0.000440797,-1.44937e-07,-5.42594e-08,1.05297,0.000440344,-3.07715e-07,8.43335e-08,1.05341,0.000439982,-5.47146e-08,-4.46558e-08,1.05385,0.000439738,-1.88682e-07,-2.49193e-08,1.05429,0.000439286,-2.6344e-07,2.5124e-08,1.05473,0.000438835,-1.88068e-07,4.36328e-08,1.05517,0.000438589,-5.71699e-08,-8.04459e-08,1.05561,0.000438234,-2.98508e-07,3.97324e-08,1.05605,0.000437756,-1.79311e-07,4.07258e-08,1.05648,0.000437519,-5.71332e-08,-8.34263e-08,1.05692,0.000437155,-3.07412e-07,5.45608e-08,1.05736,0.000436704,-1.4373e-07,-1.56078e-08,1.05779,0.000436369,-1.90553e-07,7.87043e-09,1.05823,0.000436012,-1.66942e-07,-1.58739e-08,1.05867,0.00043563,-2.14563e-07,5.56251e-08,1.0591,0.000435368,-4.76881e-08,-8.74172e-08,1.05954,0.000435011,-3.0994e-07,5.56251e-08,1.05997,0.000434558,-1.43064e-07,-1.58739e-08,1.06041,0.000434224,-1.90686e-07,7.87042e-09,1.06084,0.000433866,-1.67075e-07,-1.56078e-08,1.06127,0.000433485,-2.13898e-07,5.45609e-08,1.06171,0.000433221,-5.02157e-08,-8.34263e-08,1.06214,0.00043287,-3.00495e-07,4.07258e-08,1.06257,0.000432391,-1.78317e-07,3.97325e-08,1.063,0.000432154,-5.91198e-08,-8.04464e-08,1.06344,0.000431794,-3.00459e-07,4.36347e-08,1.06387,0.000431324,-1.69555e-07,2.5117e-08,1.0643,0.000431061,-9.42041e-08,-2.48934e-08,1.06473,0.000430798,-1.68884e-07,-4.47527e-08,1.06516,0.000430326,-3.03142e-07,8.46951e-08,1.06559,0.000429973,-4.90573e-08,-5.56089e-08,1.06602,0.000429708,-2.15884e-07,1.85314e-08,1.06645,0.000429332,-1.6029e-07,-1.85166e-08,1.06688,0.000428956,-2.1584e-07,5.5535e-08,1.06731,0.000428691,-4.92347e-08,-8.44142e-08,1.06774,0.000428339,-3.02477e-07,4.37032e-08,1.06816,0.000427865,-1.71368e-07,2.88107e-08,1.06859,0.000427609,-8.49356e-08,-3.97367e-08,1.06902,0.00042732,-2.04146e-07,1.09267e-08,1.06945,0.000426945,-1.71365e-07,-3.97023e-09,1.06987,0.00042659,-1.83276e-07,4.9542e-09,1.0703,0.000426238,-1.68414e-07,-1.58466e-08,1.07073,0.000425854,-2.15953e-07,5.84321e-08,1.07115,0.000425597,-4.0657e-08,-9.86725e-08,1.07158,0.00042522,-3.36674e-07,9.78392e-08,1.072,0.00042484,-4.31568e-08,-5.42658e-08,1.07243,0.000424591,-2.05954e-07,1.45377e-11,1.07285,0.000424179,-2.0591e-07,5.42076e-08,1.07328,0.00042393,-4.32877e-08,-9.76357e-08,1.0737,0.00042355,-3.36195e-07,9.79165e-08,1.07412,0.000423172,-4.24451e-08,-5.56118e-08,1.07455,0.00042292,-2.09281e-07,5.32143e-09,1.07497,0.000422518,-1.93316e-07,3.43261e-08,1.07539,0.000422234,-9.0338e-08,-2.34165e-08,1.07581,0.000421983,-1.60588e-07,-5.98692e-08,1.07623,0.000421482,-3.40195e-07,1.43684e-07,1.07666,0.000421233,9.08574e-08,-1.5724e-07,1.07708,0.000420943,-3.80862e-07,1.27647e-07,1.0775,0.000420564,2.0791e-09,-1.1493e-07,1.07792,0.000420223,-3.4271e-07,9.36534e-08,1.07834,0.000419819,-6.17499e-08,-2.12653e-08,1.07876,0.000419632,-1.25546e-07,-8.59219e-09,1.07918,0.000419355,-1.51322e-07,-6.35752e-08,1.0796,0.000418861,-3.42048e-07,1.43684e-07,1.08002,0.000418608,8.90034e-08,-1.53532e-07,1.08043,0.000418326,-3.71593e-07,1.12817e-07,1.08085,0.000417921,-3.31414e-08,-5.93184e-08,1.08127,0.000417677,-2.11097e-07,5.24697e-09,1.08169,0.00041727,-1.95356e-07,3.83305e-08,1.0821,0.000416995,-8.03642e-08,-3.93597e-08,1.08252,0.000416716,-1.98443e-07,-1.0094e-10,1.08294,0.000416319,-1.98746e-07,3.97635e-08,1.08335,0.00041604,-7.94557e-08,-3.97437e-08,1.08377,0.000415762,-1.98687e-07,1.94215e-12,1.08419,0.000415365,-1.98681e-07,3.97359e-08,1.0846,0.000415087,-7.94732e-08,-3.97362e-08,1.08502,0.000414809,-1.98682e-07,-4.31063e-13,1.08543,0.000414411,-1.98683e-07,3.97379e-08,1.08584,0.000414133,-7.94694e-08,-3.97418e-08,1.08626,0.000413855,-1.98695e-07,2.00563e-11,1.08667,0.000413458,-1.98635e-07,3.96616e-08,1.08709,0.000413179,-7.965e-08,-3.9457e-08,1.0875,0.000412902,-1.98021e-07,-1.04281e-09,1.08791,0.000412502,-2.01149e-07,4.36282e-08,1.08832,0.000412231,-7.02648e-08,-5.42608e-08,1.08874,0.000411928,-2.33047e-07,5.42057e-08,1.08915,0.000411624,-7.04301e-08,-4.33527e-08,1.08956,0.000411353,-2.00488e-07,-4.07378e-12,1.08997,0.000410952,-2.005e-07,4.3369e-08,1.09038,0.000410681,-7.03934e-08,-5.42627e-08,1.09079,0.000410378,-2.33182e-07,5.44726e-08,1.0912,0.000410075,-6.97637e-08,-4.44186e-08,1.09161,0.000409802,-2.03019e-07,3.99235e-09,1.09202,0.000409408,-1.91042e-07,2.84491e-08,1.09243,0.000409111,-1.05695e-07,1.42043e-09,1.09284,0.000408904,-1.01434e-07,-3.41308e-08,1.09325,0.000408599,-2.03826e-07,1.58937e-08,1.09366,0.000408239,-1.56145e-07,-2.94438e-08,1.09406,0.000407838,-2.44476e-07,1.01881e-07,1.09447,0.000407655,6.11676e-08,-1.39663e-07,1.09488,0.000407358,-3.57822e-07,9.91432e-08,1.09529,0.00040694,-6.03921e-08,-1.84912e-08,1.09569,0.000406764,-1.15866e-07,-2.51785e-08,1.0961,0.000406457,-1.91401e-07,-4.03115e-12,1.09651,0.000406074,-1.91413e-07,2.51947e-08,1.09691,0.000405767,-1.15829e-07,1.84346e-08,1.09732,0.00040559,-6.05254e-08,-9.89332e-08,1.09772,0.000405172,-3.57325e-07,1.3888e-07,1.09813,0.000404874,5.93136e-08,-9.8957e-08,1.09853,0.000404696,-2.37557e-07,1.853e-08,1.09894,0.000404277,-1.81968e-07,2.48372e-08,1.09934,0.000403987,-1.07456e-07,1.33047e-09,1.09975,0.000403776,-1.03465e-07,-3.01591e-08,1.10015,0.000403479,-1.93942e-07,9.66054e-11,1.10055,0.000403091,-1.93652e-07,2.97727e-08,1.10096,0.000402793,-1.04334e-07,2.19273e-11,1.10136,0.000402585,-1.04268e-07,-2.98604e-08,1.10176,0.000402287,-1.93849e-07,2.10325e-10,1.10216,0.0004019,-1.93218e-07,2.90191e-08,1.10256,0.0004016,-1.06161e-07,2.92264e-09,1.10297,0.000401397,-9.73931e-08,-4.07096e-08,1.10337,0.00040108,-2.19522e-07,4.07067e-08,1.10377,0.000400763,-9.7402e-08,-2.90783e-09,1.10417,0.000400559,-1.06126e-07,-2.90754e-08,1.10457,0.00040026,-1.93352e-07,9.00021e-14,1.10497,0.000399873,-1.93351e-07,2.9075e-08,1.10537,0.000399574,-1.06126e-07,2.90902e-09,1.10577,0.00039937,-9.73992e-08,-4.07111e-08,1.10617,0.000399053,-2.19533e-07,4.07262e-08,1.10657,0.000398736,-9.73541e-08,-2.98424e-09,1.10697,0.000398533,-1.06307e-07,-2.87892e-08,1.10736,0.000398234,-1.92674e-07,-1.06824e-09,1.10776,0.000397845,-1.95879e-07,3.30622e-08,1.10816,0.000397552,-9.66926e-08,-1.19712e-08,1.10856,0.000397323,-1.32606e-07,1.48225e-08,1.10895,0.000397102,-8.81387e-08,-4.73187e-08,1.10935,0.000396784,-2.30095e-07,5.52429e-08,1.10975,0.00039649,-6.4366e-08,-5.44437e-08,1.11014,0.000396198,-2.27697e-07,4.33226e-08,1.11054,0.000395872,-9.77293e-08,3.62656e-10,1.11094,0.000395678,-9.66414e-08,-4.47732e-08,1.11133,0.00039535,-2.30961e-07,5.95208e-08,1.11173,0.000395067,-5.23985e-08,-7.41008e-08,1.11212,0.00039474,-2.74701e-07,1.17673e-07,1.11252,0.000394543,7.83181e-08,-1.58172e-07,1.11291,0.000394225,-3.96199e-07,1.57389e-07,1.1133,0.000393905,7.59679e-08,-1.13756e-07,1.1137,0.000393716,-2.653e-07,5.92165e-08,1.11409,0.000393363,-8.76507e-08,-3.90074e-09,1.11449,0.000393176,-9.93529e-08,-4.36136e-08,1.11488,0.000392846,-2.30194e-07,5.91457e-08,1.11527,0.000392563,-5.27564e-08,-7.376e-08,1.11566,0.000392237,-2.74037e-07,1.16685e-07,1.11606,0.000392039,7.60189e-08,-1.54562e-07,1.11645,0.000391727,-3.87667e-07,1.43935e-07,1.11684,0.000391384,4.4137e-08,-6.35487e-08,1.11723,0.000391281,-1.46509e-07,-8.94896e-09,1.11762,0.000390961,-1.73356e-07,-1.98647e-08,1.11801,0.000390555,-2.3295e-07,8.8408e-08,1.1184,0.000390354,3.22736e-08,-9.53486e-08,1.11879,0.000390133,-2.53772e-07,5.45677e-08,1.11918,0.000389789,-9.0069e-08,-3.71296e-09,1.11957,0.000389598,-1.01208e-07,-3.97159e-08,1.11996,0.000389276,-2.20355e-07,4.33671e-08,1.12035,0.000388966,-9.02542e-08,-1.45431e-08,1.12074,0.000388741,-1.33883e-07,1.48052e-08,1.12113,0.000388518,-8.94678e-08,-4.46778e-08,1.12152,0.000388205,-2.23501e-07,4.46966e-08,1.12191,0.000387892,-8.94114e-08,-1.48992e-08,1.12229,0.000387669,-1.34109e-07,1.49003e-08,1.12268,0.000387445,-8.94082e-08,-4.47019e-08,1.12307,0.000387132,-2.23514e-07,4.4698e-08,1.12345,0.000386819,-8.942e-08,-1.48806e-08,1.12384,0.000386596,-1.34062e-07,1.48245e-08,1.12423,0.000386372,-8.95885e-08,-4.44172e-08,1.12461,0.00038606,-2.2284e-07,4.36351e-08,1.125,0.000385745,-9.19348e-08,-1.09139e-08,1.12539,0.000385528,-1.24677e-07,2.05584e-11,1.12577,0.000385279,-1.24615e-07,1.08317e-08,1.12616,0.000385062,-9.21198e-08,-4.33473e-08,1.12654,0.000384748,-2.22162e-07,4.33481e-08,1.12693,0.000384434,-9.21174e-08,-1.08356e-08,1.12731,0.000384217,-1.24624e-07,-5.50907e-12,1.12769,0.000383968,-1.24641e-07,1.08577e-08,1.12808,0.000383751,-9.20679e-08,-4.34252e-08,1.12846,0.000383437,-2.22343e-07,4.36337e-08,1.12884,0.000383123,-9.14422e-08,-1.19005e-08,1.12923,0.000382904,-1.27144e-07,3.96813e-09,1.12961,0.000382662,-1.15239e-07,-3.97207e-09,1.12999,0.000382419,-1.27155e-07,1.19201e-08,1.13038,0.000382201,-9.1395e-08,-4.37085e-08,1.13076,0.000381887,-2.2252e-07,4.37046e-08,1.13114,0.000381573,-9.14068e-08,-1.19005e-08,1.13152,0.000381355,-1.27108e-07,3.89734e-09,1.1319,0.000381112,-1.15416e-07,-3.68887e-09,1.13228,0.00038087,-1.26483e-07,1.08582e-08,1.13266,0.00038065,-9.39083e-08,-3.97438e-08,1.13304,0.000380343,-2.1314e-07,2.89076e-08,1.13342,0.000380003,-1.26417e-07,4.33225e-08,1.1338,0.00037988,3.55072e-09,-8.29883e-08,1.13418,0.000379638,-2.45414e-07,5.0212e-08,1.13456,0.000379298,-9.47781e-08,1.34964e-09,1.13494,0.000379113,-9.07292e-08,-5.56105e-08,1.13532,0.000378764,-2.57561e-07,1.01883e-07,1.1357,0.000378555,4.80889e-08,-1.13504e-07,1.13608,0.000378311,-2.92423e-07,1.13713e-07,1.13646,0.000378067,4.87176e-08,-1.02931e-07,1.13683,0.000377856,-2.60076e-07,5.95923e-08,1.13721,0.000377514,-8.12988e-08,-1.62288e-08,1.13759,0.000377303,-1.29985e-07,5.32278e-09,1.13797,0.000377059,-1.14017e-07,-5.06237e-09,1.13834,0.000376816,-1.29204e-07,1.49267e-08,1.13872,0.000376602,-8.44237e-08,-5.46444e-08,1.1391,0.000376269,-2.48357e-07,8.44417e-08,1.13947,0.000376026,4.96815e-09,-4.47039e-08,1.13985,0.000375902,-1.29143e-07,-2.48355e-08,1.14023,0.000375569,-2.0365e-07,2.48368e-08,1.1406,0.000375236,-1.2914e-07,4.46977e-08,1.14098,0.000375112,4.95341e-09,-8.44184e-08,1.14135,0.000374869,-2.48302e-07,5.45572e-08,1.14173,0.000374536,-8.463e-08,-1.46013e-08,1.1421,0.000374323,-1.28434e-07,3.8478e-09,1.14247,0.000374077,-1.1689e-07,-7.89941e-10,1.14285,0.000373841,-1.1926e-07,-6.88042e-10,1.14322,0.0003736,-1.21324e-07,3.54213e-09,1.1436,0.000373368,-1.10698e-07,-1.34805e-08,1.14397,0.000373107,-1.51139e-07,5.03798e-08,1.14434,0.000372767,0.,0.};\n\n        template <bool srgb, int blueIdx, typename T, typename D>\n        __device__ __forceinline__ void RGB2LuvConvert_f(const T& src, D& dst)\n        {\n            const float _d = 1.f / (0.950456f + 15 + 1.088754f * 3);\n            const float _un = 13 * (4 * 0.950456f * _d);\n            const float _vn = 13 * (9 * _d);\n\n            float B = blueIdx == 0 ? src.x : src.z;\n            float G = src.y;\n            float R = blueIdx == 0 ? src.z : src.x;\n\n            if (srgb)\n            {\n                B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);\n                G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);\n                R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);\n            }\n\n            float X = R * 0.412453f + G * 0.357580f + B * 0.180423f;\n            float Y = R * 0.212671f + G * 0.715160f + B * 0.072169f;\n            float Z = R * 0.019334f + G * 0.119193f + B * 0.950227f;\n\n            float L = splineInterpolate(Y * (LAB_CBRT_TAB_SIZE / 1.5f), c_LabCbrtTab, LAB_CBRT_TAB_SIZE);\n            L = 116.f * L - 16.f;\n\n            const float d = (4 * 13) / ::fmaxf(X + 15 * Y + 3 * Z, numeric_limits<float>::epsilon());\n            float u = L * (X * d - _un);\n            float v = L * ((9 * 0.25f) * Y * d - _vn);\n\n            dst.x = L;\n            dst.y = u;\n            dst.z = v;\n        }\n\n        template <bool srgb, int blueIdx, typename T, typename D>\n        __device__ __forceinline__ void RGB2LuvConvert_b(const T& src, D& dst)\n        {\n            float3 srcf, dstf;\n\n            srcf.x = src.x * (1.f / 255.f);\n            srcf.y = src.y * (1.f / 255.f);\n            srcf.z = src.z * (1.f / 255.f);\n\n            RGB2LuvConvert_f<srgb, blueIdx>(srcf, dstf);\n\n            dst.x = saturate_cast<uchar>(dstf.x * 2.55f);\n            dst.y = saturate_cast<uchar>(dstf.y * 0.72033898305084743f + 96.525423728813564f);\n            dst.z = saturate_cast<uchar>(dstf.z * 0.9732824427480916f + 136.259541984732824f);\n        }\n\n        template <typename T, int scn, int dcn, bool srgb, int blueIdx> struct RGB2Luv;\n        template <int scn, int dcn, bool srgb, int blueIdx>\n        struct RGB2Luv<uchar, scn, dcn, srgb, blueIdx>\n            : unary_function<typename TypeVec<uchar, scn>::vec_type, typename TypeVec<uchar, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<uchar, dcn>::vec_type operator ()(const typename TypeVec<uchar, scn>::vec_type& src) const\n            {\n                typename TypeVec<uchar, dcn>::vec_type dst;\n\n                RGB2LuvConvert_b<srgb, blueIdx>(src, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB2Luv() {}\n            __host__ __device__ __forceinline__ RGB2Luv(const RGB2Luv&) {}\n        };\n        template <int scn, int dcn, bool srgb, int blueIdx>\n        struct RGB2Luv<float, scn, dcn, srgb, blueIdx>\n            : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const\n            {\n                typename TypeVec<float, dcn>::vec_type dst;\n\n                RGB2LuvConvert_f<srgb, blueIdx>(src, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ RGB2Luv() {}\n            __host__ __device__ __forceinline__ RGB2Luv(const RGB2Luv&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(name, scn, dcn, srgb, blueIdx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::RGB2Luv<T, scn, dcn, srgb, blueIdx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    namespace color_detail\n    {\n        template <bool srgb, int blueIdx, typename T, typename D>\n        __device__ __forceinline__ void Luv2RGBConvert_f(const T& src, D& dst)\n        {\n            const float _d = 1.f / (0.950456f + 15 + 1.088754f * 3);\n            const float _un = 4 * 0.950456f * _d;\n            const float _vn = 9 * _d;\n\n            float L = src.x;\n            float u = src.y;\n            float v = src.z;\n\n            float Y = (L + 16.f) * (1.f / 116.f);\n            Y = Y * Y * Y;\n\n            float d = (1.f / 13.f) / L;\n            u = u * d + _un;\n            v = v * d + _vn;\n\n            float iv = 1.f / v;\n            float X = 2.25f * u * Y * iv;\n            float Z = (12 - 3 * u - 20 * v) * Y * 0.25f * iv;\n\n            float B = 0.055648f * X - 0.204043f * Y + 1.057311f * Z;\n            float G = -0.969256f * X + 1.875991f * Y + 0.041556f * Z;\n            float R = 3.240479f * X - 1.537150f * Y - 0.498535f * Z;\n\n            if (srgb)\n            {\n                B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);\n                G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);\n                R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);\n            }\n\n            dst.x = blueIdx == 0 ? B : R;\n            dst.y = G;\n            dst.z = blueIdx == 0 ? R : B;\n            setAlpha(dst, ColorChannel<float>::max());\n        }\n\n        template <bool srgb, int blueIdx, typename T, typename D>\n        __device__ __forceinline__ void Luv2RGBConvert_b(const T& src, D& dst)\n        {\n            float3 srcf, dstf;\n\n            srcf.x = src.x * (100.f / 255.f);\n            srcf.y = src.y * 1.388235294117647f - 134.f;\n            srcf.z = src.z * 1.027450980392157f - 140.f;\n\n            Luv2RGBConvert_f<srgb, blueIdx>(srcf, dstf);\n\n            dst.x = saturate_cast<uchar>(dstf.x * 255.f);\n            dst.y = saturate_cast<uchar>(dstf.y * 255.f);\n            dst.z = saturate_cast<uchar>(dstf.z * 255.f);\n            setAlpha(dst, ColorChannel<uchar>::max());\n        }\n\n        template <typename T, int scn, int dcn, bool srgb, int blueIdx> struct Luv2RGB;\n        template <int scn, int dcn, bool srgb, int blueIdx>\n        struct Luv2RGB<uchar, scn, dcn, srgb, blueIdx>\n            : unary_function<typename TypeVec<uchar, scn>::vec_type, typename TypeVec<uchar, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<uchar, dcn>::vec_type operator ()(const typename TypeVec<uchar, scn>::vec_type& src) const\n            {\n                typename TypeVec<uchar, dcn>::vec_type dst;\n\n                Luv2RGBConvert_b<srgb, blueIdx>(src, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ Luv2RGB() {}\n            __host__ __device__ __forceinline__ Luv2RGB(const Luv2RGB&) {}\n        };\n        template <int scn, int dcn, bool srgb, int blueIdx>\n        struct Luv2RGB<float, scn, dcn, srgb, blueIdx>\n            : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>\n        {\n            __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const\n            {\n                typename TypeVec<float, dcn>::vec_type dst;\n\n                Luv2RGBConvert_f<srgb, blueIdx>(src, dst);\n\n                return dst;\n            }\n            __host__ __device__ __forceinline__ Luv2RGB() {}\n            __host__ __device__ __forceinline__ Luv2RGB(const Luv2RGB&) {}\n        };\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \\\n    template <typename T> struct name ## _traits \\\n    { \\\n        typedef ::cv::cuda::device::color_detail::Luv2RGB<T, scn, dcn, srgb, blueIdx> functor_type; \\\n        static __host__ __device__ __forceinline__ functor_type create_functor() \\\n        { \\\n            return functor_type(); \\\n        } \\\n    };\n\n    #undef CV_DESCALE\n\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_COLOR_DETAIL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/detail/reduce.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_REDUCE_DETAIL_HPP__\n#define __OPENCV_CUDA_REDUCE_DETAIL_HPP__\n\n#include <thrust/tuple.h>\n#include \"../warp.hpp\"\n#include \"../warp_shuffle.hpp\"\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    namespace reduce_detail\n    {\n        template <typename T> struct GetType;\n        template <typename T> struct GetType<T*>\n        {\n            typedef T type;\n        };\n        template <typename T> struct GetType<volatile T*>\n        {\n            typedef T type;\n        };\n        template <typename T> struct GetType<T&>\n        {\n            typedef T type;\n        };\n\n        template <unsigned int I, unsigned int N>\n        struct For\n        {\n            template <class PointerTuple, class ValTuple>\n            static __device__ void loadToSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid)\n            {\n                thrust::get<I>(smem)[tid] = thrust::get<I>(val);\n\n                For<I + 1, N>::loadToSmem(smem, val, tid);\n            }\n            template <class PointerTuple, class ValTuple>\n            static __device__ void loadFromSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid)\n            {\n                thrust::get<I>(val) = thrust::get<I>(smem)[tid];\n\n                For<I + 1, N>::loadFromSmem(smem, val, tid);\n            }\n\n            template <class PointerTuple, class ValTuple, class OpTuple>\n            static __device__ void merge(const PointerTuple& smem, const ValTuple& val, unsigned int tid, unsigned int delta, const OpTuple& op)\n            {\n                typename GetType<typename thrust::tuple_element<I, PointerTuple>::type>::type reg = thrust::get<I>(smem)[tid + delta];\n                thrust::get<I>(smem)[tid] = thrust::get<I>(val) = thrust::get<I>(op)(thrust::get<I>(val), reg);\n\n                For<I + 1, N>::merge(smem, val, tid, delta, op);\n            }\n            template <class ValTuple, class OpTuple>\n            static __device__ void mergeShfl(const ValTuple& val, unsigned int delta, unsigned int width, const OpTuple& op)\n            {\n                typename GetType<typename thrust::tuple_element<I, ValTuple>::type>::type reg = shfl_down(thrust::get<I>(val), delta, width);\n                thrust::get<I>(val) = thrust::get<I>(op)(thrust::get<I>(val), reg);\n\n                For<I + 1, N>::mergeShfl(val, delta, width, op);\n            }\n        };\n        template <unsigned int N>\n        struct For<N, N>\n        {\n            template <class PointerTuple, class ValTuple>\n            static __device__ void loadToSmem(const PointerTuple&, const ValTuple&, unsigned int)\n            {\n            }\n            template <class PointerTuple, class ValTuple>\n            static __device__ void loadFromSmem(const PointerTuple&, const ValTuple&, unsigned int)\n            {\n            }\n\n            template <class PointerTuple, class ValTuple, class OpTuple>\n            static __device__ void merge(const PointerTuple&, const ValTuple&, unsigned int, unsigned int, const OpTuple&)\n            {\n            }\n            template <class ValTuple, class OpTuple>\n            static __device__ void mergeShfl(const ValTuple&, unsigned int, unsigned int, const OpTuple&)\n            {\n            }\n        };\n\n        template <typename T>\n        __device__ __forceinline__ void loadToSmem(volatile T* smem, T& val, unsigned int tid)\n        {\n            smem[tid] = val;\n        }\n        template <typename T>\n        __device__ __forceinline__ void loadFromSmem(volatile T* smem, T& val, unsigned int tid)\n        {\n            val = smem[tid];\n        }\n        template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,\n                  typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9>\n        __device__ __forceinline__ void loadToSmem(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,\n                                                       const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,\n                                                       unsigned int tid)\n        {\n            For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::loadToSmem(smem, val, tid);\n        }\n        template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,\n                  typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9>\n        __device__ __forceinline__ void loadFromSmem(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,\n                                                         const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,\n                                                         unsigned int tid)\n        {\n            For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::loadFromSmem(smem, val, tid);\n        }\n\n        template <typename T, class Op>\n        __device__ __forceinline__ void merge(volatile T* smem, T& val, unsigned int tid, unsigned int delta, const Op& op)\n        {\n            T reg = smem[tid + delta];\n            smem[tid] = val = op(val, reg);\n        }\n        template <typename T, class Op>\n        __device__ __forceinline__ void mergeShfl(T& val, unsigned int delta, unsigned int width, const Op& op)\n        {\n            T reg = shfl_down(val, delta, width);\n            val = op(val, reg);\n        }\n        template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,\n                  typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,\n                  class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>\n        __device__ __forceinline__ void merge(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,\n                                              const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,\n                                              unsigned int tid,\n                                              unsigned int delta,\n                                              const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)\n        {\n            For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::merge(smem, val, tid, delta, op);\n        }\n        template <typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,\n                  class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>\n        __device__ __forceinline__ void mergeShfl(const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,\n                                                  unsigned int delta,\n                                                  unsigned int width,\n                                                  const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)\n        {\n            For<0, thrust::tuple_size<thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9> >::value>::mergeShfl(val, delta, width, op);\n        }\n\n        template <unsigned int N> struct Generic\n        {\n            template <typename Pointer, typename Reference, class Op>\n            static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)\n            {\n                loadToSmem(smem, val, tid);\n                if (N >= 32)\n                    __syncthreads();\n\n                if (N >= 2048)\n                {\n                    if (tid < 1024)\n                        merge(smem, val, tid, 1024, op);\n\n                    __syncthreads();\n                }\n                if (N >= 1024)\n                {\n                    if (tid < 512)\n                        merge(smem, val, tid, 512, op);\n\n                    __syncthreads();\n                }\n                if (N >= 512)\n                {\n                    if (tid < 256)\n                        merge(smem, val, tid, 256, op);\n\n                    __syncthreads();\n                }\n                if (N >= 256)\n                {\n                    if (tid < 128)\n                        merge(smem, val, tid, 128, op);\n\n                    __syncthreads();\n                }\n                if (N >= 128)\n                {\n                    if (tid < 64)\n                        merge(smem, val, tid, 64, op);\n\n                    __syncthreads();\n                }\n                if (N >= 64)\n                {\n                    if (tid < 32)\n                        merge(smem, val, tid, 32, op);\n                }\n\n                if (tid < 16)\n                {\n                    merge(smem, val, tid, 16, op);\n                    merge(smem, val, tid, 8, op);\n                    merge(smem, val, tid, 4, op);\n                    merge(smem, val, tid, 2, op);\n                    merge(smem, val, tid, 1, op);\n                }\n            }\n        };\n\n        template <unsigned int I, typename Pointer, typename Reference, class Op>\n        struct Unroll\n        {\n            static __device__ void loopShfl(Reference val, Op op, unsigned int N)\n            {\n                mergeShfl(val, I, N, op);\n                Unroll<I / 2, Pointer, Reference, Op>::loopShfl(val, op, N);\n            }\n            static __device__ void loop(Pointer smem, Reference val, unsigned int tid, Op op)\n            {\n                merge(smem, val, tid, I, op);\n                Unroll<I / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);\n            }\n        };\n        template <typename Pointer, typename Reference, class Op>\n        struct Unroll<0, Pointer, Reference, Op>\n        {\n            static __device__ void loopShfl(Reference, Op, unsigned int)\n            {\n            }\n            static __device__ void loop(Pointer, Reference, unsigned int, Op)\n            {\n            }\n        };\n\n        template <unsigned int N> struct WarpOptimized\n        {\n            template <typename Pointer, typename Reference, class Op>\n            static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)\n            {\n            #if __CUDA_ARCH__ >= 300\n                (void) smem;\n                (void) tid;\n\n                Unroll<N / 2, Pointer, Reference, Op>::loopShfl(val, op, N);\n            #else\n                loadToSmem(smem, val, tid);\n\n                if (tid < N / 2)\n                    Unroll<N / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);\n            #endif\n            }\n        };\n\n        template <unsigned int N> struct GenericOptimized32\n        {\n            enum { M = N / 32 };\n\n            template <typename Pointer, typename Reference, class Op>\n            static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)\n            {\n                const unsigned int laneId = Warp::laneId();\n\n            #if __CUDA_ARCH__ >= 300\n                Unroll<16, Pointer, Reference, Op>::loopShfl(val, op, warpSize);\n\n                if (laneId == 0)\n                    loadToSmem(smem, val, tid / 32);\n            #else\n                loadToSmem(smem, val, tid);\n\n                if (laneId < 16)\n                    Unroll<16, Pointer, Reference, Op>::loop(smem, val, tid, op);\n\n                __syncthreads();\n\n                if (laneId == 0)\n                    loadToSmem(smem, val, tid / 32);\n            #endif\n\n                __syncthreads();\n\n                loadFromSmem(smem, val, tid);\n\n                if (tid < 32)\n                {\n                #if __CUDA_ARCH__ >= 300\n                    Unroll<M / 2, Pointer, Reference, Op>::loopShfl(val, op, M);\n                #else\n                    Unroll<M / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);\n                #endif\n                }\n            }\n        };\n\n        template <bool val, class T1, class T2> struct StaticIf;\n        template <class T1, class T2> struct StaticIf<true, T1, T2>\n        {\n            typedef T1 type;\n        };\n        template <class T1, class T2> struct StaticIf<false, T1, T2>\n        {\n            typedef T2 type;\n        };\n\n        template <unsigned int N> struct IsPowerOf2\n        {\n            enum { value = ((N != 0) && !(N & (N - 1))) };\n        };\n\n        template <unsigned int N> struct Dispatcher\n        {\n            typedef typename StaticIf<\n                (N <= 32) && IsPowerOf2<N>::value,\n                WarpOptimized<N>,\n                typename StaticIf<\n                    (N <= 1024) && IsPowerOf2<N>::value,\n                    GenericOptimized32<N>,\n                    Generic<N>\n                >::type\n            >::type reductor;\n        };\n    }\n}}}\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_REDUCE_DETAIL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/detail/reduce_key_val.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP__\n#define __OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP__\n\n#include <thrust/tuple.h>\n#include \"../warp.hpp\"\n#include \"../warp_shuffle.hpp\"\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    namespace reduce_key_val_detail\n    {\n        template <typename T> struct GetType;\n        template <typename T> struct GetType<T*>\n        {\n            typedef T type;\n        };\n        template <typename T> struct GetType<volatile T*>\n        {\n            typedef T type;\n        };\n        template <typename T> struct GetType<T&>\n        {\n            typedef T type;\n        };\n\n        template <unsigned int I, unsigned int N>\n        struct For\n        {\n            template <class PointerTuple, class ReferenceTuple>\n            static __device__ void loadToSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid)\n            {\n                thrust::get<I>(smem)[tid] = thrust::get<I>(data);\n\n                For<I + 1, N>::loadToSmem(smem, data, tid);\n            }\n            template <class PointerTuple, class ReferenceTuple>\n            static __device__ void loadFromSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid)\n            {\n                thrust::get<I>(data) = thrust::get<I>(smem)[tid];\n\n                For<I + 1, N>::loadFromSmem(smem, data, tid);\n            }\n\n            template <class ReferenceTuple>\n            static __device__ void copyShfl(const ReferenceTuple& val, unsigned int delta, int width)\n            {\n                thrust::get<I>(val) = shfl_down(thrust::get<I>(val), delta, width);\n\n                For<I + 1, N>::copyShfl(val, delta, width);\n            }\n            template <class PointerTuple, class ReferenceTuple>\n            static __device__ void copy(const PointerTuple& svals, const ReferenceTuple& val, unsigned int tid, unsigned int delta)\n            {\n                thrust::get<I>(svals)[tid] = thrust::get<I>(val) = thrust::get<I>(svals)[tid + delta];\n\n                For<I + 1, N>::copy(svals, val, tid, delta);\n            }\n\n            template <class KeyReferenceTuple, class ValReferenceTuple, class CmpTuple>\n            static __device__ void mergeShfl(const KeyReferenceTuple& key, const ValReferenceTuple& val, const CmpTuple& cmp, unsigned int delta, int width)\n            {\n                typename GetType<typename thrust::tuple_element<I, KeyReferenceTuple>::type>::type reg = shfl_down(thrust::get<I>(key), delta, width);\n\n                if (thrust::get<I>(cmp)(reg, thrust::get<I>(key)))\n                {\n                    thrust::get<I>(key) = reg;\n                    thrust::get<I>(val) = shfl_down(thrust::get<I>(val), delta, width);\n                }\n\n                For<I + 1, N>::mergeShfl(key, val, cmp, delta, width);\n            }\n            template <class KeyPointerTuple, class KeyReferenceTuple, class ValPointerTuple, class ValReferenceTuple, class CmpTuple>\n            static __device__ void merge(const KeyPointerTuple& skeys, const KeyReferenceTuple& key,\n                                         const ValPointerTuple& svals, const ValReferenceTuple& val,\n                                         const CmpTuple& cmp,\n                                         unsigned int tid, unsigned int delta)\n            {\n                typename GetType<typename thrust::tuple_element<I, KeyPointerTuple>::type>::type reg = thrust::get<I>(skeys)[tid + delta];\n\n                if (thrust::get<I>(cmp)(reg, thrust::get<I>(key)))\n                {\n                    thrust::get<I>(skeys)[tid] = thrust::get<I>(key) = reg;\n                    thrust::get<I>(svals)[tid] = thrust::get<I>(val) = thrust::get<I>(svals)[tid + delta];\n                }\n\n                For<I + 1, N>::merge(skeys, key, svals, val, cmp, tid, delta);\n            }\n        };\n        template <unsigned int N>\n        struct For<N, N>\n        {\n            template <class PointerTuple, class ReferenceTuple>\n            static __device__ void loadToSmem(const PointerTuple&, const ReferenceTuple&, unsigned int)\n            {\n            }\n            template <class PointerTuple, class ReferenceTuple>\n            static __device__ void loadFromSmem(const PointerTuple&, const ReferenceTuple&, unsigned int)\n            {\n            }\n\n            template <class ReferenceTuple>\n            static __device__ void copyShfl(const ReferenceTuple&, unsigned int, int)\n            {\n            }\n            template <class PointerTuple, class ReferenceTuple>\n            static __device__ void copy(const PointerTuple&, const ReferenceTuple&, unsigned int, unsigned int)\n            {\n            }\n\n            template <class KeyReferenceTuple, class ValReferenceTuple, class CmpTuple>\n            static __device__ void mergeShfl(const KeyReferenceTuple&, const ValReferenceTuple&, const CmpTuple&, unsigned int, int)\n            {\n            }\n            template <class KeyPointerTuple, class KeyReferenceTuple, class ValPointerTuple, class ValReferenceTuple, class CmpTuple>\n            static __device__ void merge(const KeyPointerTuple&, const KeyReferenceTuple&,\n                                         const ValPointerTuple&, const ValReferenceTuple&,\n                                         const CmpTuple&,\n                                         unsigned int, unsigned int)\n            {\n            }\n        };\n\n        //////////////////////////////////////////////////////\n        // loadToSmem\n\n        template <typename T>\n        __device__ __forceinline__ void loadToSmem(volatile T* smem, T& data, unsigned int tid)\n        {\n            smem[tid] = data;\n        }\n        template <typename T>\n        __device__ __forceinline__ void loadFromSmem(volatile T* smem, T& data, unsigned int tid)\n        {\n            data = smem[tid];\n        }\n        template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,\n                  typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>\n        __device__ __forceinline__ void loadToSmem(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& smem,\n                                                   const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& data,\n                                                   unsigned int tid)\n        {\n            For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::loadToSmem(smem, data, tid);\n        }\n        template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,\n                  typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>\n        __device__ __forceinline__ void loadFromSmem(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& smem,\n                                                     const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& data,\n                                                     unsigned int tid)\n        {\n            For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::loadFromSmem(smem, data, tid);\n        }\n\n        //////////////////////////////////////////////////////\n        // copyVals\n\n        template <typename V>\n        __device__ __forceinline__ void copyValsShfl(V& val, unsigned int delta, int width)\n        {\n            val = shfl_down(val, delta, width);\n        }\n        template <typename V>\n        __device__ __forceinline__ void copyVals(volatile V* svals, V& val, unsigned int tid, unsigned int delta)\n        {\n            svals[tid] = val = svals[tid + delta];\n        }\n        template <typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>\n        __device__ __forceinline__ void copyValsShfl(const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,\n                                                     unsigned int delta,\n                                                     int width)\n        {\n            For<0, thrust::tuple_size<thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9> >::value>::copyShfl(val, delta, width);\n        }\n        template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,\n                  typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>\n        __device__ __forceinline__ void copyVals(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,\n                                                 const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,\n                                                 unsigned int tid, unsigned int delta)\n        {\n            For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::copy(svals, val, tid, delta);\n        }\n\n        //////////////////////////////////////////////////////\n        // merge\n\n        template <typename K, typename V, class Cmp>\n        __device__ __forceinline__ void mergeShfl(K& key, V& val, const Cmp& cmp, unsigned int delta, int width)\n        {\n            K reg = shfl_down(key, delta, width);\n\n            if (cmp(reg, key))\n            {\n                key = reg;\n                copyValsShfl(val, delta, width);\n            }\n        }\n        template <typename K, typename V, class Cmp>\n        __device__ __forceinline__ void merge(volatile K* skeys, K& key, volatile V* svals, V& val, const Cmp& cmp, unsigned int tid, unsigned int delta)\n        {\n            K reg = skeys[tid + delta];\n\n            if (cmp(reg, key))\n            {\n                skeys[tid] = key = reg;\n                copyVals(svals, val, tid, delta);\n            }\n        }\n        template <typename K,\n                  typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,\n                  class Cmp>\n        __device__ __forceinline__ void mergeShfl(K& key,\n                                                  const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,\n                                                  const Cmp& cmp,\n                                                  unsigned int delta, int width)\n        {\n            K reg = shfl_down(key, delta, width);\n\n            if (cmp(reg, key))\n            {\n                key = reg;\n                copyValsShfl(val, delta, width);\n            }\n        }\n        template <typename K,\n                  typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,\n                  typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,\n                  class Cmp>\n        __device__ __forceinline__ void merge(volatile K* skeys, K& key,\n                                              const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,\n                                              const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,\n                                              const Cmp& cmp, unsigned int tid, unsigned int delta)\n        {\n            K reg = skeys[tid + delta];\n\n            if (cmp(reg, key))\n            {\n                skeys[tid] = key = reg;\n                copyVals(svals, val, tid, delta);\n            }\n        }\n        template <typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,\n                  typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,\n                  class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>\n        __device__ __forceinline__ void mergeShfl(const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,\n                                                  const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,\n                                                  const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp,\n                                                  unsigned int delta, int width)\n        {\n            For<0, thrust::tuple_size<thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9> >::value>::mergeShfl(key, val, cmp, delta, width);\n        }\n        template <typename KP0, typename KP1, typename KP2, typename KP3, typename KP4, typename KP5, typename KP6, typename KP7, typename KP8, typename KP9,\n                  typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,\n                  typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,\n                  typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,\n                  class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>\n        __device__ __forceinline__ void merge(const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>& skeys,\n                                              const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,\n                                              const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,\n                                              const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,\n                                              const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp,\n                                              unsigned int tid, unsigned int delta)\n        {\n            For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::merge(skeys, key, svals, val, cmp, tid, delta);\n        }\n\n        //////////////////////////////////////////////////////\n        // Generic\n\n        template <unsigned int N> struct Generic\n        {\n            template <class KP, class KR, class VP, class VR, class Cmp>\n            static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)\n            {\n                loadToSmem(skeys, key, tid);\n                loadValsToSmem(svals, val, tid);\n                if (N >= 32)\n                    __syncthreads();\n\n                if (N >= 2048)\n                {\n                    if (tid < 1024)\n                        merge(skeys, key, svals, val, cmp, tid, 1024);\n\n                    __syncthreads();\n                }\n                if (N >= 1024)\n                {\n                    if (tid < 512)\n                        merge(skeys, key, svals, val, cmp, tid, 512);\n\n                    __syncthreads();\n                }\n                if (N >= 512)\n                {\n                    if (tid < 256)\n                        merge(skeys, key, svals, val, cmp, tid, 256);\n\n                    __syncthreads();\n                }\n                if (N >= 256)\n                {\n                    if (tid < 128)\n                        merge(skeys, key, svals, val, cmp, tid, 128);\n\n                    __syncthreads();\n                }\n                if (N >= 128)\n                {\n                    if (tid < 64)\n                        merge(skeys, key, svals, val, cmp, tid, 64);\n\n                    __syncthreads();\n                }\n                if (N >= 64)\n                {\n                    if (tid < 32)\n                        merge(skeys, key, svals, val, cmp, tid, 32);\n                }\n\n                if (tid < 16)\n                {\n                    merge(skeys, key, svals, val, cmp, tid, 16);\n                    merge(skeys, key, svals, val, cmp, tid, 8);\n                    merge(skeys, key, svals, val, cmp, tid, 4);\n                    merge(skeys, key, svals, val, cmp, tid, 2);\n                    merge(skeys, key, svals, val, cmp, tid, 1);\n                }\n            }\n        };\n\n        template <unsigned int I, class KP, class KR, class VP, class VR, class Cmp>\n        struct Unroll\n        {\n            static __device__ void loopShfl(KR key, VR val, Cmp cmp, unsigned int N)\n            {\n                mergeShfl(key, val, cmp, I, N);\n                Unroll<I / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, N);\n            }\n            static __device__ void loop(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)\n            {\n                merge(skeys, key, svals, val, cmp, tid, I);\n                Unroll<I / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);\n            }\n        };\n        template <class KP, class KR, class VP, class VR, class Cmp>\n        struct Unroll<0, KP, KR, VP, VR, Cmp>\n        {\n            static __device__ void loopShfl(KR, VR, Cmp, unsigned int)\n            {\n            }\n            static __device__ void loop(KP, KR, VP, VR, unsigned int, Cmp)\n            {\n            }\n        };\n\n        template <unsigned int N> struct WarpOptimized\n        {\n            template <class KP, class KR, class VP, class VR, class Cmp>\n            static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)\n            {\n            #if 0 // __CUDA_ARCH__ >= 300\n                (void) skeys;\n                (void) svals;\n                (void) tid;\n\n                Unroll<N / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, N);\n            #else\n                loadToSmem(skeys, key, tid);\n                loadToSmem(svals, val, tid);\n\n                if (tid < N / 2)\n                    Unroll<N / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);\n            #endif\n            }\n        };\n\n        template <unsigned int N> struct GenericOptimized32\n        {\n            enum { M = N / 32 };\n\n            template <class KP, class KR, class VP, class VR, class Cmp>\n            static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)\n            {\n                const unsigned int laneId = Warp::laneId();\n\n            #if 0 // __CUDA_ARCH__ >= 300\n                Unroll<16, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, warpSize);\n\n                if (laneId == 0)\n                {\n                    loadToSmem(skeys, key, tid / 32);\n                    loadToSmem(svals, val, tid / 32);\n                }\n            #else\n                loadToSmem(skeys, key, tid);\n                loadToSmem(svals, val, tid);\n\n                if (laneId < 16)\n                    Unroll<16, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);\n\n                __syncthreads();\n\n                if (laneId == 0)\n                {\n                    loadToSmem(skeys, key, tid / 32);\n                    loadToSmem(svals, val, tid / 32);\n                }\n            #endif\n\n                __syncthreads();\n\n                loadFromSmem(skeys, key, tid);\n\n                if (tid < 32)\n                {\n                #if 0 // __CUDA_ARCH__ >= 300\n                    loadFromSmem(svals, val, tid);\n\n                    Unroll<M / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, M);\n                #else\n                    Unroll<M / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);\n                #endif\n                }\n            }\n        };\n\n        template <bool val, class T1, class T2> struct StaticIf;\n        template <class T1, class T2> struct StaticIf<true, T1, T2>\n        {\n            typedef T1 type;\n        };\n        template <class T1, class T2> struct StaticIf<false, T1, T2>\n        {\n            typedef T2 type;\n        };\n\n        template <unsigned int N> struct IsPowerOf2\n        {\n            enum { value = ((N != 0) && !(N & (N - 1))) };\n        };\n\n        template <unsigned int N> struct Dispatcher\n        {\n            typedef typename StaticIf<\n                (N <= 32) && IsPowerOf2<N>::value,\n                WarpOptimized<N>,\n                typename StaticIf<\n                    (N <= 1024) && IsPowerOf2<N>::value,\n                    GenericOptimized32<N>,\n                    Generic<N>\n                >::type\n            >::type reductor;\n        };\n    }\n}}}\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/detail/transform_detail.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_TRANSFORM_DETAIL_HPP__\n#define __OPENCV_CUDA_TRANSFORM_DETAIL_HPP__\n\n#include \"../common.hpp\"\n#include \"../vec_traits.hpp\"\n#include \"../functional.hpp\"\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    namespace transform_detail\n    {\n        //! Read Write Traits\n\n        template <typename T, typename D, int shift> struct UnaryReadWriteTraits\n        {\n            typedef typename TypeVec<T, shift>::vec_type read_type;\n            typedef typename TypeVec<D, shift>::vec_type write_type;\n        };\n\n        template <typename T1, typename T2, typename D, int shift> struct BinaryReadWriteTraits\n        {\n            typedef typename TypeVec<T1, shift>::vec_type read_type1;\n            typedef typename TypeVec<T2, shift>::vec_type read_type2;\n            typedef typename TypeVec<D, shift>::vec_type write_type;\n        };\n\n        //! Transform kernels\n\n        template <int shift> struct OpUnroller;\n        template <> struct OpUnroller<1>\n        {\n            template <typename T, typename D, typename UnOp, typename Mask>\n            static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)\n            {\n                if (mask(y, x_shifted))\n                    dst.x = op(src.x);\n            }\n\n            template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\n            static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)\n            {\n                if (mask(y, x_shifted))\n                    dst.x = op(src1.x, src2.x);\n            }\n        };\n        template <> struct OpUnroller<2>\n        {\n            template <typename T, typename D, typename UnOp, typename Mask>\n            static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)\n            {\n                if (mask(y, x_shifted))\n                    dst.x = op(src.x);\n                if (mask(y, x_shifted + 1))\n                    dst.y = op(src.y);\n            }\n\n            template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\n            static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)\n            {\n                if (mask(y, x_shifted))\n                    dst.x = op(src1.x, src2.x);\n                if (mask(y, x_shifted + 1))\n                    dst.y = op(src1.y, src2.y);\n            }\n        };\n        template <> struct OpUnroller<3>\n        {\n            template <typename T, typename D, typename UnOp, typename Mask>\n            static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)\n            {\n                if (mask(y, x_shifted))\n                    dst.x = op(src.x);\n                if (mask(y, x_shifted + 1))\n                    dst.y = op(src.y);\n                if (mask(y, x_shifted + 2))\n                    dst.z = op(src.z);\n            }\n\n            template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\n            static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)\n            {\n                if (mask(y, x_shifted))\n                    dst.x = op(src1.x, src2.x);\n                if (mask(y, x_shifted + 1))\n                    dst.y = op(src1.y, src2.y);\n                if (mask(y, x_shifted + 2))\n                    dst.z = op(src1.z, src2.z);\n            }\n        };\n        template <> struct OpUnroller<4>\n        {\n            template <typename T, typename D, typename UnOp, typename Mask>\n            static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)\n            {\n                if (mask(y, x_shifted))\n                    dst.x = op(src.x);\n                if (mask(y, x_shifted + 1))\n                    dst.y = op(src.y);\n                if (mask(y, x_shifted + 2))\n                    dst.z = op(src.z);\n                if (mask(y, x_shifted + 3))\n                    dst.w = op(src.w);\n            }\n\n            template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\n            static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)\n            {\n                if (mask(y, x_shifted))\n                    dst.x = op(src1.x, src2.x);\n                if (mask(y, x_shifted + 1))\n                    dst.y = op(src1.y, src2.y);\n                if (mask(y, x_shifted + 2))\n                    dst.z = op(src1.z, src2.z);\n                if (mask(y, x_shifted + 3))\n                    dst.w = op(src1.w, src2.w);\n            }\n        };\n        template <> struct OpUnroller<8>\n        {\n            template <typename T, typename D, typename UnOp, typename Mask>\n            static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)\n            {\n                if (mask(y, x_shifted))\n                    dst.a0 = op(src.a0);\n                if (mask(y, x_shifted + 1))\n                    dst.a1 = op(src.a1);\n                if (mask(y, x_shifted + 2))\n                    dst.a2 = op(src.a2);\n                if (mask(y, x_shifted + 3))\n                    dst.a3 = op(src.a3);\n                if (mask(y, x_shifted + 4))\n                    dst.a4 = op(src.a4);\n                if (mask(y, x_shifted + 5))\n                    dst.a5 = op(src.a5);\n                if (mask(y, x_shifted + 6))\n                    dst.a6 = op(src.a6);\n                if (mask(y, x_shifted + 7))\n                    dst.a7 = op(src.a7);\n            }\n\n            template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\n            static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)\n            {\n                if (mask(y, x_shifted))\n                    dst.a0 = op(src1.a0, src2.a0);\n                if (mask(y, x_shifted + 1))\n                    dst.a1 = op(src1.a1, src2.a1);\n                if (mask(y, x_shifted + 2))\n                    dst.a2 = op(src1.a2, src2.a2);\n                if (mask(y, x_shifted + 3))\n                    dst.a3 = op(src1.a3, src2.a3);\n                if (mask(y, x_shifted + 4))\n                    dst.a4 = op(src1.a4, src2.a4);\n                if (mask(y, x_shifted + 5))\n                    dst.a5 = op(src1.a5, src2.a5);\n                if (mask(y, x_shifted + 6))\n                    dst.a6 = op(src1.a6, src2.a6);\n                if (mask(y, x_shifted + 7))\n                    dst.a7 = op(src1.a7, src2.a7);\n            }\n        };\n\n        template <typename T, typename D, typename UnOp, typename Mask>\n        static __global__ void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)\n        {\n            typedef TransformFunctorTraits<UnOp> ft;\n            typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;\n            typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::write_type write_type;\n\n            const int x = threadIdx.x + blockIdx.x * blockDim.x;\n            const int y = threadIdx.y + blockIdx.y * blockDim.y;\n            const int x_shifted = x * ft::smart_shift;\n\n            if (y < src_.rows)\n            {\n                const T* src = src_.ptr(y);\n                D* dst = dst_.ptr(y);\n\n                if (x_shifted + ft::smart_shift - 1 < src_.cols)\n                {\n                    const read_type src_n_el = ((const read_type*)src)[x];\n                    write_type dst_n_el = ((const write_type*)dst)[x];\n\n                    OpUnroller<ft::smart_shift>::unroll(src_n_el, dst_n_el, mask, op, x_shifted, y);\n\n                    ((write_type*)dst)[x] = dst_n_el;\n                }\n                else\n                {\n                    for (int real_x = x_shifted; real_x < src_.cols; ++real_x)\n                    {\n                        if (mask(y, real_x))\n                            dst[real_x] = op(src[real_x]);\n                    }\n                }\n            }\n        }\n\n        template <typename T, typename D, typename UnOp, typename Mask>\n        __global__ static void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)\n        {\n            const int x = blockDim.x * blockIdx.x + threadIdx.x;\n            const int y = blockDim.y * blockIdx.y + threadIdx.y;\n\n            if (x < src.cols && y < src.rows && mask(y, x))\n            {\n                dst.ptr(y)[x] = op(src.ptr(y)[x]);\n            }\n        }\n\n        template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\n        static __global__ void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,\n            const Mask mask, const BinOp op)\n        {\n            typedef TransformFunctorTraits<BinOp> ft;\n            typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::read_type1 read_type1;\n            typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::read_type2 read_type2;\n            typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::write_type write_type;\n\n            const int x = threadIdx.x + blockIdx.x * blockDim.x;\n            const int y = threadIdx.y + blockIdx.y * blockDim.y;\n            const int x_shifted = x * ft::smart_shift;\n\n            if (y < src1_.rows)\n            {\n                const T1* src1 = src1_.ptr(y);\n                const T2* src2 = src2_.ptr(y);\n                D* dst = dst_.ptr(y);\n\n                if (x_shifted + ft::smart_shift - 1 < src1_.cols)\n                {\n                    const read_type1 src1_n_el = ((const read_type1*)src1)[x];\n                    const read_type2 src2_n_el = ((const read_type2*)src2)[x];\n                    write_type dst_n_el = ((const write_type*)dst)[x];\n\n                    OpUnroller<ft::smart_shift>::unroll(src1_n_el, src2_n_el, dst_n_el, mask, op, x_shifted, y);\n\n                    ((write_type*)dst)[x] = dst_n_el;\n                }\n                else\n                {\n                    for (int real_x = x_shifted; real_x < src1_.cols; ++real_x)\n                    {\n                        if (mask(y, real_x))\n                            dst[real_x] = op(src1[real_x], src2[real_x]);\n                    }\n                }\n            }\n        }\n\n        template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\n        static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,\n            const Mask mask, const BinOp op)\n        {\n            const int x = blockDim.x * blockIdx.x + threadIdx.x;\n            const int y = blockDim.y * blockIdx.y + threadIdx.y;\n\n            if (x < src1.cols && y < src1.rows && mask(y, x))\n            {\n                const T1 src1_data = src1.ptr(y)[x];\n                const T2 src2_data = src2.ptr(y)[x];\n                dst.ptr(y)[x] = op(src1_data, src2_data);\n            }\n        }\n\n        template <bool UseSmart> struct TransformDispatcher;\n        template<> struct TransformDispatcher<false>\n        {\n            template <typename T, typename D, typename UnOp, typename Mask>\n            static void call(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, Mask mask, cudaStream_t stream)\n            {\n                typedef TransformFunctorTraits<UnOp> ft;\n\n                const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);\n                const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1);\n\n                transformSimple<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);\n                cudaSafeCall( cudaGetLastError() );\n\n                if (stream == 0)\n                    cudaSafeCall( cudaDeviceSynchronize() );\n            }\n\n            template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\n            static void call(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, Mask mask, cudaStream_t stream)\n            {\n                typedef TransformFunctorTraits<BinOp> ft;\n\n                const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);\n                const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1);\n\n                transformSimple<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);\n                cudaSafeCall( cudaGetLastError() );\n\n                if (stream == 0)\n                    cudaSafeCall( cudaDeviceSynchronize() );\n            }\n        };\n        template<> struct TransformDispatcher<true>\n        {\n            template <typename T, typename D, typename UnOp, typename Mask>\n            static void call(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, Mask mask, cudaStream_t stream)\n            {\n                typedef TransformFunctorTraits<UnOp> ft;\n\n                CV_StaticAssert(ft::smart_shift != 1, \"\");\n\n                if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) ||\n                    !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))\n                {\n                    TransformDispatcher<false>::call(src, dst, op, mask, stream);\n                    return;\n                }\n\n                const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);\n                const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1);\n\n                transformSmart<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);\n                cudaSafeCall( cudaGetLastError() );\n\n                if (stream == 0)\n                    cudaSafeCall( cudaDeviceSynchronize() );\n            }\n\n            template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\n            static void call(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, Mask mask, cudaStream_t stream)\n            {\n                typedef TransformFunctorTraits<BinOp> ft;\n\n                CV_StaticAssert(ft::smart_shift != 1, \"\");\n\n                if (!isAligned(src1.data, ft::smart_shift * sizeof(T1)) || !isAligned(src1.step, ft::smart_shift * sizeof(T1)) ||\n                    !isAligned(src2.data, ft::smart_shift * sizeof(T2)) || !isAligned(src2.step, ft::smart_shift * sizeof(T2)) ||\n                    !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))\n                {\n                    TransformDispatcher<false>::call(src1, src2, dst, op, mask, stream);\n                    return;\n                }\n\n                const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);\n                const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1);\n\n                transformSmart<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);\n                cudaSafeCall( cudaGetLastError() );\n\n                if (stream == 0)\n                    cudaSafeCall( cudaDeviceSynchronize() );\n            }\n        };\n    } // namespace transform_detail\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_TRANSFORM_DETAIL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/detail/type_traits_detail.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP__\n#define __OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP__\n\n#include \"../common.hpp\"\n#include \"../vec_traits.hpp\"\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    namespace type_traits_detail\n    {\n        template <bool, typename T1, typename T2> struct Select { typedef T1 type; };\n        template <typename T1, typename T2> struct Select<false, T1, T2> { typedef T2 type; };\n\n        template <typename T> struct IsSignedIntergral { enum {value = 0}; };\n        template <> struct IsSignedIntergral<schar> { enum {value = 1}; };\n        template <> struct IsSignedIntergral<char1> { enum {value = 1}; };\n        template <> struct IsSignedIntergral<short> { enum {value = 1}; };\n        template <> struct IsSignedIntergral<short1> { enum {value = 1}; };\n        template <> struct IsSignedIntergral<int> { enum {value = 1}; };\n        template <> struct IsSignedIntergral<int1> { enum {value = 1}; };\n\n        template <typename T> struct IsUnsignedIntegral { enum {value = 0}; };\n        template <> struct IsUnsignedIntegral<uchar> { enum {value = 1}; };\n        template <> struct IsUnsignedIntegral<uchar1> { enum {value = 1}; };\n        template <> struct IsUnsignedIntegral<ushort> { enum {value = 1}; };\n        template <> struct IsUnsignedIntegral<ushort1> { enum {value = 1}; };\n        template <> struct IsUnsignedIntegral<uint> { enum {value = 1}; };\n        template <> struct IsUnsignedIntegral<uint1> { enum {value = 1}; };\n\n        template <typename T> struct IsIntegral { enum {value = IsSignedIntergral<T>::value || IsUnsignedIntegral<T>::value}; };\n        template <> struct IsIntegral<char> { enum {value = 1}; };\n        template <> struct IsIntegral<bool> { enum {value = 1}; };\n\n        template <typename T> struct IsFloat { enum {value = 0}; };\n        template <> struct IsFloat<float> { enum {value = 1}; };\n        template <> struct IsFloat<double> { enum {value = 1}; };\n\n        template <typename T> struct IsVec { enum {value = 0}; };\n        template <> struct IsVec<uchar1> { enum {value = 1}; };\n        template <> struct IsVec<uchar2> { enum {value = 1}; };\n        template <> struct IsVec<uchar3> { enum {value = 1}; };\n        template <> struct IsVec<uchar4> { enum {value = 1}; };\n        template <> struct IsVec<uchar8> { enum {value = 1}; };\n        template <> struct IsVec<char1> { enum {value = 1}; };\n        template <> struct IsVec<char2> { enum {value = 1}; };\n        template <> struct IsVec<char3> { enum {value = 1}; };\n        template <> struct IsVec<char4> { enum {value = 1}; };\n        template <> struct IsVec<char8> { enum {value = 1}; };\n        template <> struct IsVec<ushort1> { enum {value = 1}; };\n        template <> struct IsVec<ushort2> { enum {value = 1}; };\n        template <> struct IsVec<ushort3> { enum {value = 1}; };\n        template <> struct IsVec<ushort4> { enum {value = 1}; };\n        template <> struct IsVec<ushort8> { enum {value = 1}; };\n        template <> struct IsVec<short1> { enum {value = 1}; };\n        template <> struct IsVec<short2> { enum {value = 1}; };\n        template <> struct IsVec<short3> { enum {value = 1}; };\n        template <> struct IsVec<short4> { enum {value = 1}; };\n        template <> struct IsVec<short8> { enum {value = 1}; };\n        template <> struct IsVec<uint1> { enum {value = 1}; };\n        template <> struct IsVec<uint2> { enum {value = 1}; };\n        template <> struct IsVec<uint3> { enum {value = 1}; };\n        template <> struct IsVec<uint4> { enum {value = 1}; };\n        template <> struct IsVec<uint8> { enum {value = 1}; };\n        template <> struct IsVec<int1> { enum {value = 1}; };\n        template <> struct IsVec<int2> { enum {value = 1}; };\n        template <> struct IsVec<int3> { enum {value = 1}; };\n        template <> struct IsVec<int4> { enum {value = 1}; };\n        template <> struct IsVec<int8> { enum {value = 1}; };\n        template <> struct IsVec<float1> { enum {value = 1}; };\n        template <> struct IsVec<float2> { enum {value = 1}; };\n        template <> struct IsVec<float3> { enum {value = 1}; };\n        template <> struct IsVec<float4> { enum {value = 1}; };\n        template <> struct IsVec<float8> { enum {value = 1}; };\n        template <> struct IsVec<double1> { enum {value = 1}; };\n        template <> struct IsVec<double2> { enum {value = 1}; };\n        template <> struct IsVec<double3> { enum {value = 1}; };\n        template <> struct IsVec<double4> { enum {value = 1}; };\n        template <> struct IsVec<double8> { enum {value = 1}; };\n\n        template <class U> struct AddParameterType { typedef const U& type; };\n        template <class U> struct AddParameterType<U&> { typedef U& type; };\n        template <> struct AddParameterType<void> { typedef void type; };\n\n        template <class U> struct ReferenceTraits\n        {\n            enum { value = false };\n            typedef U type;\n        };\n        template <class U> struct ReferenceTraits<U&>\n        {\n            enum { value = true };\n            typedef U type;\n        };\n\n        template <class U> struct PointerTraits\n        {\n            enum { value = false };\n            typedef void type;\n        };\n        template <class U> struct PointerTraits<U*>\n        {\n            enum { value = true };\n            typedef U type;\n        };\n        template <class U> struct PointerTraits<U*&>\n        {\n            enum { value = true };\n            typedef U type;\n        };\n\n        template <class U> struct UnConst\n        {\n            typedef U type;\n            enum { value = 0 };\n        };\n        template <class U> struct UnConst<const U>\n        {\n            typedef U type;\n            enum { value = 1 };\n        };\n        template <class U> struct UnConst<const U&>\n        {\n            typedef U& type;\n            enum { value = 1 };\n        };\n\n        template <class U> struct UnVolatile\n        {\n            typedef U type;\n            enum { value = 0 };\n        };\n        template <class U> struct UnVolatile<volatile U>\n        {\n            typedef U type;\n            enum { value = 1 };\n        };\n        template <class U> struct UnVolatile<volatile U&>\n        {\n            typedef U& type;\n            enum { value = 1 };\n        };\n    } // namespace type_traits_detail\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/detail/vec_distance_detail.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP__\n#define __OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP__\n\n#include \"../datamov_utils.hpp\"\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    namespace vec_distance_detail\n    {\n        template <int THREAD_DIM, int N> struct UnrollVecDiffCached\n        {\n            template <typename Dist, typename T1, typename T2>\n            static __device__ void calcCheck(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int ind)\n            {\n                if (ind < len)\n                {\n                    T1 val1 = *vecCached++;\n\n                    T2 val2;\n                    ForceGlob<T2>::Load(vecGlob, ind, val2);\n\n                    dist.reduceIter(val1, val2);\n\n                    UnrollVecDiffCached<THREAD_DIM, N - 1>::calcCheck(vecCached, vecGlob, len, dist, ind + THREAD_DIM);\n                }\n            }\n\n            template <typename Dist, typename T1, typename T2>\n            static __device__ void calcWithoutCheck(const T1* vecCached, const T2* vecGlob, Dist& dist)\n            {\n                T1 val1 = *vecCached++;\n\n                T2 val2;\n                ForceGlob<T2>::Load(vecGlob, 0, val2);\n                vecGlob += THREAD_DIM;\n\n                dist.reduceIter(val1, val2);\n\n                UnrollVecDiffCached<THREAD_DIM, N - 1>::calcWithoutCheck(vecCached, vecGlob, dist);\n            }\n        };\n        template <int THREAD_DIM> struct UnrollVecDiffCached<THREAD_DIM, 0>\n        {\n            template <typename Dist, typename T1, typename T2>\n            static __device__ __forceinline__ void calcCheck(const T1*, const T2*, int, Dist&, int)\n            {\n            }\n\n            template <typename Dist, typename T1, typename T2>\n            static __device__ __forceinline__ void calcWithoutCheck(const T1*, const T2*, Dist&)\n            {\n            }\n        };\n\n        template <int THREAD_DIM, int MAX_LEN, bool LEN_EQ_MAX_LEN> struct VecDiffCachedCalculator;\n        template <int THREAD_DIM, int MAX_LEN> struct VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, false>\n        {\n            template <typename Dist, typename T1, typename T2>\n            static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid)\n            {\n                UnrollVecDiffCached<THREAD_DIM, MAX_LEN / THREAD_DIM>::calcCheck(vecCached, vecGlob, len, dist, tid);\n            }\n        };\n        template <int THREAD_DIM, int MAX_LEN> struct VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, true>\n        {\n            template <typename Dist, typename T1, typename T2>\n            static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid)\n            {\n                UnrollVecDiffCached<THREAD_DIM, MAX_LEN / THREAD_DIM>::calcWithoutCheck(vecCached, vecGlob + tid, dist);\n            }\n        };\n    } // namespace vec_distance_detail\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/dynamic_smem.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_DYNAMIC_SMEM_HPP__\n#define __OPENCV_CUDA_DYNAMIC_SMEM_HPP__\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template<class T> struct DynamicSharedMem\n    {\n        __device__ __forceinline__ operator T*()\n        {\n            extern __shared__ int __smem[];\n            return (T*)__smem;\n        }\n\n        __device__ __forceinline__ operator const T*() const\n        {\n            extern __shared__ int __smem[];\n            return (T*)__smem;\n        }\n    };\n\n    // specialize for double to avoid unaligned memory access compile errors\n    template<> struct DynamicSharedMem<double>\n    {\n        __device__ __forceinline__ operator double*()\n        {\n            extern __shared__ double __smem_d[];\n            return (double*)__smem_d;\n        }\n\n        __device__ __forceinline__ operator const double*() const\n        {\n            extern __shared__ double __smem_d[];\n            return (double*)__smem_d;\n        }\n    };\n}}}\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_DYNAMIC_SMEM_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/emulation.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_CUDA_EMULATION_HPP_\n#define OPENCV_CUDA_EMULATION_HPP_\n\n#include \"common.hpp\"\n#include \"warp_reduce.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    struct Emulation\n    {\n\n        static __device__ __forceinline__ int syncthreadsOr(int pred)\n        {\n#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)\n                // just campilation stab\n                return 0;\n#else\n                return __syncthreads_or(pred);\n#endif\n        }\n\n        template<int CTA_SIZE>\n        static __forceinline__ __device__ int Ballot(int predicate)\n        {\n#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)\n            return __ballot(predicate);\n#else\n            __shared__ volatile int cta_buffer[CTA_SIZE];\n\n            int tid = threadIdx.x;\n            cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0;\n            return warp_reduce(cta_buffer);\n#endif\n        }\n\n        struct smem\n        {\n            enum { TAG_MASK = (1U << ( (sizeof(unsigned int) << 3) - 5U)) - 1U };\n\n            template<typename T>\n            static __device__ __forceinline__ T atomicInc(T* address, T val)\n            {\n#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)\n                T count;\n                unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U);\n                do\n                {\n                    count = *address & TAG_MASK;\n                    count = tag | (count + 1);\n                    *address = count;\n                } while (*address != count);\n\n                return (count & TAG_MASK) - 1;\n#else\n                return ::atomicInc(address, val);\n#endif\n            }\n\n            template<typename T>\n            static __device__ __forceinline__ T atomicAdd(T* address, T val)\n            {\n#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)\n                T count;\n                unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U);\n                do\n                {\n                    count = *address & TAG_MASK;\n                    count = tag | (count + val);\n                    *address = count;\n                } while (*address != count);\n\n                return (count & TAG_MASK) - val;\n#else\n                return ::atomicAdd(address, val);\n#endif\n            }\n\n            template<typename T>\n            static __device__ __forceinline__ T atomicMin(T* address, T val)\n            {\n#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)\n                T count = ::min(*address, val);\n                do\n                {\n                    *address = count;\n                } while (*address > count);\n\n                return count;\n#else\n                return ::atomicMin(address, val);\n#endif\n            }\n        }; // struct cmem\n\n        struct glob\n        {\n            static __device__ __forceinline__ int atomicAdd(int* address, int val)\n            {\n                return ::atomicAdd(address, val);\n            }\n            static __device__ __forceinline__ unsigned int atomicAdd(unsigned int* address, unsigned int val)\n            {\n                return ::atomicAdd(address, val);\n            }\n            static __device__ __forceinline__ float atomicAdd(float* address, float val)\n            {\n            #if __CUDA_ARCH__ >= 200\n                return ::atomicAdd(address, val);\n            #else\n                int* address_as_i = (int*) address;\n                int old = *address_as_i, assumed;\n                do {\n                    assumed = old;\n                    old = ::atomicCAS(address_as_i, assumed,\n                        __float_as_int(val + __int_as_float(assumed)));\n                } while (assumed != old);\n                return __int_as_float(old);\n            #endif\n            }\n            static __device__ __forceinline__ double atomicAdd(double* address, double val)\n            {\n            #if __CUDA_ARCH__ >= 130\n                unsigned long long int* address_as_ull = (unsigned long long int*) address;\n                unsigned long long int old = *address_as_ull, assumed;\n                do {\n                    assumed = old;\n                    old = ::atomicCAS(address_as_ull, assumed,\n                        __double_as_longlong(val + __longlong_as_double(assumed)));\n                } while (assumed != old);\n                return __longlong_as_double(old);\n            #else\n                (void) address;\n                (void) val;\n                return 0.0;\n            #endif\n            }\n\n            static __device__ __forceinline__ int atomicMin(int* address, int val)\n            {\n                return ::atomicMin(address, val);\n            }\n            static __device__ __forceinline__ float atomicMin(float* address, float val)\n            {\n            #if __CUDA_ARCH__ >= 120\n                int* address_as_i = (int*) address;\n                int old = *address_as_i, assumed;\n                do {\n                    assumed = old;\n                    old = ::atomicCAS(address_as_i, assumed,\n                        __float_as_int(::fminf(val, __int_as_float(assumed))));\n                } while (assumed != old);\n                return __int_as_float(old);\n            #else\n                (void) address;\n                (void) val;\n                return 0.0f;\n            #endif\n            }\n            static __device__ __forceinline__ double atomicMin(double* address, double val)\n            {\n            #if __CUDA_ARCH__ >= 130\n                unsigned long long int* address_as_ull = (unsigned long long int*) address;\n                unsigned long long int old = *address_as_ull, assumed;\n                do {\n                    assumed = old;\n                    old = ::atomicCAS(address_as_ull, assumed,\n                        __double_as_longlong(::fmin(val, __longlong_as_double(assumed))));\n                } while (assumed != old);\n                return __longlong_as_double(old);\n            #else\n                (void) address;\n                (void) val;\n                return 0.0;\n            #endif\n            }\n\n            static __device__ __forceinline__ int atomicMax(int* address, int val)\n            {\n                return ::atomicMax(address, val);\n            }\n            static __device__ __forceinline__ float atomicMax(float* address, float val)\n            {\n            #if __CUDA_ARCH__ >= 120\n                int* address_as_i = (int*) address;\n                int old = *address_as_i, assumed;\n                do {\n                    assumed = old;\n                    old = ::atomicCAS(address_as_i, assumed,\n                        __float_as_int(::fmaxf(val, __int_as_float(assumed))));\n                } while (assumed != old);\n                return __int_as_float(old);\n            #else\n                (void) address;\n                (void) val;\n                return 0.0f;\n            #endif\n            }\n            static __device__ __forceinline__ double atomicMax(double* address, double val)\n            {\n            #if __CUDA_ARCH__ >= 130\n                unsigned long long int* address_as_ull = (unsigned long long int*) address;\n                unsigned long long int old = *address_as_ull, assumed;\n                do {\n                    assumed = old;\n                    old = ::atomicCAS(address_as_ull, assumed,\n                        __double_as_longlong(::fmax(val, __longlong_as_double(assumed))));\n                } while (assumed != old);\n                return __longlong_as_double(old);\n            #else\n                (void) address;\n                (void) val;\n                return 0.0;\n            #endif\n            }\n        };\n    }; //struct Emulation\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif /* OPENCV_CUDA_EMULATION_HPP_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/filters.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_FILTERS_HPP__\n#define __OPENCV_CUDA_FILTERS_HPP__\n\n#include \"saturate_cast.hpp\"\n#include \"vec_traits.hpp\"\n#include \"vec_math.hpp\"\n#include \"type_traits.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template <typename Ptr2D> struct PointFilter\n    {\n        typedef typename Ptr2D::elem_type elem_type;\n        typedef float index_type;\n\n        explicit __host__ __device__ __forceinline__ PointFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)\n        : src(src_)\n        {\n            (void)fx;\n            (void)fy;\n        }\n\n        __device__ __forceinline__ elem_type operator ()(float y, float x) const\n        {\n            return src(__float2int_rz(y), __float2int_rz(x));\n        }\n\n        Ptr2D src;\n    };\n\n    template <typename Ptr2D> struct LinearFilter\n    {\n        typedef typename Ptr2D::elem_type elem_type;\n        typedef float index_type;\n\n        explicit __host__ __device__ __forceinline__ LinearFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)\n        : src(src_)\n        {\n            (void)fx;\n            (void)fy;\n        }\n        __device__ __forceinline__ elem_type operator ()(float y, float x) const\n        {\n            typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;\n\n            work_type out = VecTraits<work_type>::all(0);\n\n            const int x1 = __float2int_rd(x);\n            const int y1 = __float2int_rd(y);\n            const int x2 = x1 + 1;\n            const int y2 = y1 + 1;\n\n            elem_type src_reg = src(y1, x1);\n            out = out + src_reg * ((x2 - x) * (y2 - y));\n\n            src_reg = src(y1, x2);\n            out = out + src_reg * ((x - x1) * (y2 - y));\n\n            src_reg = src(y2, x1);\n            out = out + src_reg * ((x2 - x) * (y - y1));\n\n            src_reg = src(y2, x2);\n            out = out + src_reg * ((x - x1) * (y - y1));\n\n            return saturate_cast<elem_type>(out);\n        }\n\n        Ptr2D src;\n    };\n\n    template <typename Ptr2D> struct CubicFilter\n    {\n        typedef typename Ptr2D::elem_type elem_type;\n        typedef float index_type;\n        typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;\n\n        explicit __host__ __device__ __forceinline__ CubicFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)\n        : src(src_)\n        {\n            (void)fx;\n            (void)fy;\n        }\n\n        static __device__ __forceinline__ float bicubicCoeff(float x_)\n        {\n            float x = fabsf(x_);\n            if (x <= 1.0f)\n            {\n                return x * x * (1.5f * x - 2.5f) + 1.0f;\n            }\n            else if (x < 2.0f)\n            {\n                return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;\n            }\n            else\n            {\n                return 0.0f;\n            }\n        }\n\n        __device__ elem_type operator ()(float y, float x) const\n        {\n            const float xmin = ::ceilf(x - 2.0f);\n            const float xmax = ::floorf(x + 2.0f);\n\n            const float ymin = ::ceilf(y - 2.0f);\n            const float ymax = ::floorf(y + 2.0f);\n\n            work_type sum = VecTraits<work_type>::all(0);\n            float wsum = 0.0f;\n\n            for (float cy = ymin; cy <= ymax; cy += 1.0f)\n            {\n                for (float cx = xmin; cx <= xmax; cx += 1.0f)\n                {\n                    const float w = bicubicCoeff(x - cx) * bicubicCoeff(y - cy);\n                    sum = sum + w * src(__float2int_rd(cy), __float2int_rd(cx));\n                    wsum += w;\n                }\n            }\n\n            work_type res = (!wsum)? VecTraits<work_type>::all(0) : sum / wsum;\n\n            return saturate_cast<elem_type>(res);\n        }\n\n        Ptr2D src;\n    };\n    // for integer scaling\n    template <typename Ptr2D> struct IntegerAreaFilter\n    {\n        typedef typename Ptr2D::elem_type elem_type;\n        typedef float index_type;\n\n        explicit __host__ __device__ __forceinline__ IntegerAreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_)\n            : src(src_), scale_x(scale_x_), scale_y(scale_y_), scale(1.f / (scale_x * scale_y)) {}\n\n        __device__ __forceinline__ elem_type operator ()(float y, float x) const\n        {\n            float fsx1 = x * scale_x;\n            float fsx2 = fsx1 + scale_x;\n\n            int sx1 = __float2int_ru(fsx1);\n            int sx2 = __float2int_rd(fsx2);\n\n            float fsy1 = y * scale_y;\n            float fsy2 = fsy1 + scale_y;\n\n            int sy1 = __float2int_ru(fsy1);\n            int sy2 = __float2int_rd(fsy2);\n\n            typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;\n            work_type out = VecTraits<work_type>::all(0.f);\n\n            for(int dy = sy1; dy < sy2; ++dy)\n                for(int dx = sx1; dx < sx2; ++dx)\n                {\n                    out = out + src(dy, dx) * scale;\n                }\n\n            return saturate_cast<elem_type>(out);\n        }\n\n        Ptr2D src;\n        float scale_x, scale_y ,scale;\n    };\n\n    template <typename Ptr2D> struct AreaFilter\n    {\n        typedef typename Ptr2D::elem_type elem_type;\n        typedef float index_type;\n\n        explicit __host__ __device__ __forceinline__ AreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_)\n            : src(src_), scale_x(scale_x_), scale_y(scale_y_){}\n\n        __device__ __forceinline__ elem_type operator ()(float y, float x) const\n        {\n            float fsx1 = x * scale_x;\n            float fsx2 = fsx1 + scale_x;\n\n            int sx1 = __float2int_ru(fsx1);\n            int sx2 = __float2int_rd(fsx2);\n\n            float fsy1 = y * scale_y;\n            float fsy2 = fsy1 + scale_y;\n\n            int sy1 = __float2int_ru(fsy1);\n            int sy2 = __float2int_rd(fsy2);\n\n            float scale = 1.f / (fminf(scale_x, src.width - fsx1) * fminf(scale_y, src.height - fsy1));\n\n            typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;\n            work_type out = VecTraits<work_type>::all(0.f);\n\n            for (int dy = sy1; dy < sy2; ++dy)\n            {\n                for (int dx = sx1; dx < sx2; ++dx)\n                    out = out + src(dy, dx) * scale;\n\n                if (sx1 > fsx1)\n                    out = out + src(dy, (sx1 -1) ) * ((sx1 - fsx1) * scale);\n\n                if (sx2 < fsx2)\n                    out = out + src(dy, sx2) * ((fsx2 -sx2) * scale);\n            }\n\n            if (sy1 > fsy1)\n                for (int dx = sx1; dx < sx2; ++dx)\n                    out = out + src( (sy1 - 1) , dx) * ((sy1 -fsy1) * scale);\n\n            if (sy2 < fsy2)\n                for (int dx = sx1; dx < sx2; ++dx)\n                    out = out + src(sy2, dx) * ((fsy2 -sy2) * scale);\n\n            if ((sy1 > fsy1) &&  (sx1 > fsx1))\n                out = out + src( (sy1 - 1) , (sx1 - 1)) * ((sy1 -fsy1) * (sx1 -fsx1) * scale);\n\n            if ((sy1 > fsy1) &&  (sx2 < fsx2))\n                out = out + src( (sy1 - 1) , sx2) * ((sy1 -fsy1) * (fsx2 -sx2) * scale);\n\n            if ((sy2 < fsy2) &&  (sx2 < fsx2))\n                out = out + src(sy2, sx2) * ((fsy2 -sy2) * (fsx2 -sx2) * scale);\n\n            if ((sy2 < fsy2) &&  (sx1 > fsx1))\n                out = out + src(sy2, (sx1 - 1)) * ((fsy2 -sy2) * (sx1 -fsx1) * scale);\n\n            return saturate_cast<elem_type>(out);\n        }\n\n        Ptr2D src;\n        float scale_x, scale_y;\n        int width, haight;\n    };\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_FILTERS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/funcattrib.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP_\n#define __OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP_\n\n#include <cstdio>\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template<class Func>\n    void printFuncAttrib(Func& func)\n    {\n\n        cudaFuncAttributes attrs;\n        cudaFuncGetAttributes(&attrs, func);\n\n        printf(\"=== Function stats ===\\n\");\n        printf(\"Name: \\n\");\n        printf(\"sharedSizeBytes    = %d\\n\", attrs.sharedSizeBytes);\n        printf(\"constSizeBytes     = %d\\n\", attrs.constSizeBytes);\n        printf(\"localSizeBytes     = %d\\n\", attrs.localSizeBytes);\n        printf(\"maxThreadsPerBlock = %d\\n\", attrs.maxThreadsPerBlock);\n        printf(\"numRegs            = %d\\n\", attrs.numRegs);\n        printf(\"ptxVersion         = %d\\n\", attrs.ptxVersion);\n        printf(\"binaryVersion      = %d\\n\", attrs.binaryVersion);\n        printf(\"\\n\");\n        fflush(stdout);\n    }\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif  /* __OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/functional.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_FUNCTIONAL_HPP__\n#define __OPENCV_CUDA_FUNCTIONAL_HPP__\n\n#include <functional>\n#include \"saturate_cast.hpp\"\n#include \"vec_traits.hpp\"\n#include \"type_traits.hpp\"\n#include \"device_functions.h\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    // Function Objects\n    template<typename Argument, typename Result> struct unary_function : public std::unary_function<Argument, Result> {};\n    template<typename Argument1, typename Argument2, typename Result> struct binary_function : public std::binary_function<Argument1, Argument2, Result> {};\n\n    // Arithmetic Operations\n    template <typename T> struct plus : binary_function<T, T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\n                                                 typename TypeTraits<T>::ParameterType b) const\n        {\n            return a + b;\n        }\n        __host__ __device__ __forceinline__ plus() {}\n        __host__ __device__ __forceinline__ plus(const plus&) {}\n    };\n\n    template <typename T> struct minus : binary_function<T, T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\n                                                 typename TypeTraits<T>::ParameterType b) const\n        {\n            return a - b;\n        }\n        __host__ __device__ __forceinline__ minus() {}\n        __host__ __device__ __forceinline__ minus(const minus&) {}\n    };\n\n    template <typename T> struct multiplies : binary_function<T, T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\n                                                 typename TypeTraits<T>::ParameterType b) const\n        {\n            return a * b;\n        }\n        __host__ __device__ __forceinline__ multiplies() {}\n        __host__ __device__ __forceinline__ multiplies(const multiplies&) {}\n    };\n\n    template <typename T> struct divides : binary_function<T, T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\n                                                 typename TypeTraits<T>::ParameterType b) const\n        {\n            return a / b;\n        }\n        __host__ __device__ __forceinline__ divides() {}\n        __host__ __device__ __forceinline__ divides(const divides&) {}\n    };\n\n    template <typename T> struct modulus : binary_function<T, T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\n                                                 typename TypeTraits<T>::ParameterType b) const\n        {\n            return a % b;\n        }\n        __host__ __device__ __forceinline__ modulus() {}\n        __host__ __device__ __forceinline__ modulus(const modulus&) {}\n    };\n\n    template <typename T> struct negate : unary_function<T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a) const\n        {\n            return -a;\n        }\n        __host__ __device__ __forceinline__ negate() {}\n        __host__ __device__ __forceinline__ negate(const negate&) {}\n    };\n\n    // Comparison Operations\n    template <typename T> struct equal_to : binary_function<T, T, bool>\n    {\n        __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\n                                                    typename TypeTraits<T>::ParameterType b) const\n        {\n            return a == b;\n        }\n        __host__ __device__ __forceinline__ equal_to() {}\n        __host__ __device__ __forceinline__ equal_to(const equal_to&) {}\n    };\n\n    template <typename T> struct not_equal_to : binary_function<T, T, bool>\n    {\n        __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\n                                                    typename TypeTraits<T>::ParameterType b) const\n        {\n            return a != b;\n        }\n        __host__ __device__ __forceinline__ not_equal_to() {}\n        __host__ __device__ __forceinline__ not_equal_to(const not_equal_to&) {}\n    };\n\n    template <typename T> struct greater : binary_function<T, T, bool>\n    {\n        __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\n                                                    typename TypeTraits<T>::ParameterType b) const\n        {\n            return a > b;\n        }\n        __host__ __device__ __forceinline__ greater() {}\n        __host__ __device__ __forceinline__ greater(const greater&) {}\n    };\n\n    template <typename T> struct less : binary_function<T, T, bool>\n    {\n        __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\n                                                    typename TypeTraits<T>::ParameterType b) const\n        {\n            return a < b;\n        }\n        __host__ __device__ __forceinline__ less() {}\n        __host__ __device__ __forceinline__ less(const less&) {}\n    };\n\n    template <typename T> struct greater_equal : binary_function<T, T, bool>\n    {\n        __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\n                                                    typename TypeTraits<T>::ParameterType b) const\n        {\n            return a >= b;\n        }\n        __host__ __device__ __forceinline__ greater_equal() {}\n        __host__ __device__ __forceinline__ greater_equal(const greater_equal&) {}\n    };\n\n    template <typename T> struct less_equal : binary_function<T, T, bool>\n    {\n        __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\n                                                    typename TypeTraits<T>::ParameterType b) const\n        {\n            return a <= b;\n        }\n        __host__ __device__ __forceinline__ less_equal() {}\n        __host__ __device__ __forceinline__ less_equal(const less_equal&) {}\n    };\n\n    // Logical Operations\n    template <typename T> struct logical_and : binary_function<T, T, bool>\n    {\n        __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\n                                                    typename TypeTraits<T>::ParameterType b) const\n        {\n            return a && b;\n        }\n        __host__ __device__ __forceinline__ logical_and() {}\n        __host__ __device__ __forceinline__ logical_and(const logical_and&) {}\n    };\n\n    template <typename T> struct logical_or : binary_function<T, T, bool>\n    {\n        __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\n                                                    typename TypeTraits<T>::ParameterType b) const\n        {\n            return a || b;\n        }\n        __host__ __device__ __forceinline__ logical_or() {}\n        __host__ __device__ __forceinline__ logical_or(const logical_or&) {}\n    };\n\n    template <typename T> struct logical_not : unary_function<T, bool>\n    {\n        __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a) const\n        {\n            return !a;\n        }\n        __host__ __device__ __forceinline__ logical_not() {}\n        __host__ __device__ __forceinline__ logical_not(const logical_not&) {}\n    };\n\n    // Bitwise Operations\n    template <typename T> struct bit_and : binary_function<T, T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\n                                                 typename TypeTraits<T>::ParameterType b) const\n        {\n            return a & b;\n        }\n        __host__ __device__ __forceinline__ bit_and() {}\n        __host__ __device__ __forceinline__ bit_and(const bit_and&) {}\n    };\n\n    template <typename T> struct bit_or : binary_function<T, T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\n                                                 typename TypeTraits<T>::ParameterType b) const\n        {\n            return a | b;\n        }\n        __host__ __device__ __forceinline__ bit_or() {}\n        __host__ __device__ __forceinline__ bit_or(const bit_or&) {}\n    };\n\n    template <typename T> struct bit_xor : binary_function<T, T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\n                                                 typename TypeTraits<T>::ParameterType b) const\n        {\n            return a ^ b;\n        }\n        __host__ __device__ __forceinline__ bit_xor() {}\n        __host__ __device__ __forceinline__ bit_xor(const bit_xor&) {}\n    };\n\n    template <typename T> struct bit_not : unary_function<T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType v) const\n        {\n            return ~v;\n        }\n        __host__ __device__ __forceinline__ bit_not() {}\n        __host__ __device__ __forceinline__ bit_not(const bit_not&) {}\n    };\n\n    // Generalized Identity Operations\n    template <typename T> struct identity : unary_function<T, T>\n    {\n        __device__ __forceinline__ typename TypeTraits<T>::ParameterType operator()(typename TypeTraits<T>::ParameterType x) const\n        {\n            return x;\n        }\n        __host__ __device__ __forceinline__ identity() {}\n        __host__ __device__ __forceinline__ identity(const identity&) {}\n    };\n\n    template <typename T1, typename T2> struct project1st : binary_function<T1, T2, T1>\n    {\n        __device__ __forceinline__ typename TypeTraits<T1>::ParameterType operator()(typename TypeTraits<T1>::ParameterType lhs, typename TypeTraits<T2>::ParameterType rhs) const\n        {\n            return lhs;\n        }\n        __host__ __device__ __forceinline__ project1st() {}\n        __host__ __device__ __forceinline__ project1st(const project1st&) {}\n    };\n\n    template <typename T1, typename T2> struct project2nd : binary_function<T1, T2, T2>\n    {\n        __device__ __forceinline__ typename TypeTraits<T2>::ParameterType operator()(typename TypeTraits<T1>::ParameterType lhs, typename TypeTraits<T2>::ParameterType rhs) const\n        {\n            return rhs;\n        }\n        __host__ __device__ __forceinline__ project2nd() {}\n        __host__ __device__ __forceinline__ project2nd(const project2nd&) {}\n    };\n\n    // Min/Max Operations\n\n#define OPENCV_CUDA_IMPLEMENT_MINMAX(name, type, op) \\\n    template <> struct name<type> : binary_function<type, type, type> \\\n    { \\\n        __device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \\\n        __host__ __device__ __forceinline__ name() {}\\\n        __host__ __device__ __forceinline__ name(const name&) {}\\\n    };\n\n    template <typename T> struct maximum : binary_function<T, T, T>\n    {\n        __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType lhs, typename TypeTraits<T>::ParameterType rhs) const\n        {\n            return max(lhs, rhs);\n        }\n        __host__ __device__ __forceinline__ maximum() {}\n        __host__ __device__ __forceinline__ maximum(const maximum&) {}\n    };\n\n    OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, uchar, ::max)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, schar, ::max)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, char, ::max)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, ushort, ::max)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, short, ::max)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, int, ::max)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, uint, ::max)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, float, ::fmax)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, double, ::fmax)\n\n    template <typename T> struct minimum : binary_function<T, T, T>\n    {\n        __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType lhs, typename TypeTraits<T>::ParameterType rhs) const\n        {\n            return min(lhs, rhs);\n        }\n        __host__ __device__ __forceinline__ minimum() {}\n        __host__ __device__ __forceinline__ minimum(const minimum&) {}\n    };\n\n    OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, uchar, ::min)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, schar, ::min)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, char, ::min)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, ushort, ::min)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, short, ::min)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, int, ::min)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, uint, ::min)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, float, ::fmin)\n    OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, double, ::fmin)\n\n#undef OPENCV_CUDA_IMPLEMENT_MINMAX\n\n    // Math functions\n\n    template <typename T> struct abs_func : unary_function<T, T>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType x) const\n        {\n            return abs(x);\n        }\n\n        __host__ __device__ __forceinline__ abs_func() {}\n        __host__ __device__ __forceinline__ abs_func(const abs_func&) {}\n    };\n    template <> struct abs_func<unsigned char> : unary_function<unsigned char, unsigned char>\n    {\n        __device__ __forceinline__ unsigned char operator ()(unsigned char x) const\n        {\n            return x;\n        }\n\n        __host__ __device__ __forceinline__ abs_func() {}\n        __host__ __device__ __forceinline__ abs_func(const abs_func&) {}\n    };\n    template <> struct abs_func<signed char> : unary_function<signed char, signed char>\n    {\n        __device__ __forceinline__ signed char operator ()(signed char x) const\n        {\n            return ::abs((int)x);\n        }\n\n        __host__ __device__ __forceinline__ abs_func() {}\n        __host__ __device__ __forceinline__ abs_func(const abs_func&) {}\n    };\n    template <> struct abs_func<char> : unary_function<char, char>\n    {\n        __device__ __forceinline__ char operator ()(char x) const\n        {\n            return ::abs((int)x);\n        }\n\n        __host__ __device__ __forceinline__ abs_func() {}\n        __host__ __device__ __forceinline__ abs_func(const abs_func&) {}\n    };\n    template <> struct abs_func<unsigned short> : unary_function<unsigned short, unsigned short>\n    {\n        __device__ __forceinline__ unsigned short operator ()(unsigned short x) const\n        {\n            return x;\n        }\n\n        __host__ __device__ __forceinline__ abs_func() {}\n        __host__ __device__ __forceinline__ abs_func(const abs_func&) {}\n    };\n    template <> struct abs_func<short> : unary_function<short, short>\n    {\n        __device__ __forceinline__ short operator ()(short x) const\n        {\n            return ::abs((int)x);\n        }\n\n        __host__ __device__ __forceinline__ abs_func() {}\n        __host__ __device__ __forceinline__ abs_func(const abs_func&) {}\n    };\n    template <> struct abs_func<unsigned int> : unary_function<unsigned int, unsigned int>\n    {\n        __device__ __forceinline__ unsigned int operator ()(unsigned int x) const\n        {\n            return x;\n        }\n\n        __host__ __device__ __forceinline__ abs_func() {}\n        __host__ __device__ __forceinline__ abs_func(const abs_func&) {}\n    };\n    template <> struct abs_func<int> : unary_function<int, int>\n    {\n        __device__ __forceinline__ int operator ()(int x) const\n        {\n            return ::abs(x);\n        }\n\n        __host__ __device__ __forceinline__ abs_func() {}\n        __host__ __device__ __forceinline__ abs_func(const abs_func&) {}\n    };\n    template <> struct abs_func<float> : unary_function<float, float>\n    {\n        __device__ __forceinline__ float operator ()(float x) const\n        {\n            return ::fabsf(x);\n        }\n\n        __host__ __device__ __forceinline__ abs_func() {}\n        __host__ __device__ __forceinline__ abs_func(const abs_func&) {}\n    };\n    template <> struct abs_func<double> : unary_function<double, double>\n    {\n        __device__ __forceinline__ double operator ()(double x) const\n        {\n            return ::fabs(x);\n        }\n\n        __host__ __device__ __forceinline__ abs_func() {}\n        __host__ __device__ __forceinline__ abs_func(const abs_func&) {}\n    };\n\n#define OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(name, func) \\\n    template <typename T> struct name ## _func : unary_function<T, float> \\\n    { \\\n        __device__ __forceinline__ float operator ()(typename TypeTraits<T>::ParameterType v) const \\\n        { \\\n            return func ## f(v); \\\n        } \\\n        __host__ __device__ __forceinline__ name ## _func() {} \\\n        __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \\\n    }; \\\n    template <> struct name ## _func<double> : unary_function<double, double> \\\n    { \\\n        __device__ __forceinline__ double operator ()(double v) const \\\n        { \\\n            return func(v); \\\n        } \\\n        __host__ __device__ __forceinline__ name ## _func() {} \\\n        __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \\\n    };\n\n#define OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(name, func) \\\n    template <typename T> struct name ## _func : binary_function<T, T, float> \\\n    { \\\n        __device__ __forceinline__ float operator ()(typename TypeTraits<T>::ParameterType v1, typename TypeTraits<T>::ParameterType v2) const \\\n        { \\\n            return func ## f(v1, v2); \\\n        } \\\n        __host__ __device__ __forceinline__ name ## _func() {} \\\n        __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \\\n    }; \\\n    template <> struct name ## _func<double> : binary_function<double, double, double> \\\n    { \\\n        __device__ __forceinline__ double operator ()(double v1, double v2) const \\\n        { \\\n            return func(v1, v2); \\\n        } \\\n        __host__ __device__ __forceinline__ name ## _func() {} \\\n        __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \\\n    };\n\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp, ::exp)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp2, ::exp2)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp10, ::exp10)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log, ::log)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log2, ::log2)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log10, ::log10)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sin, ::sin)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(cos, ::cos)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(tan, ::tan)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(asin, ::asin)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(acos, ::acos)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(atan, ::atan)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sinh, ::sinh)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(cosh, ::cosh)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(tanh, ::tanh)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(asinh, ::asinh)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(acosh, ::acosh)\n    OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(atanh, ::atanh)\n\n    OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(hypot, ::hypot)\n    OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(atan2, ::atan2)\n    OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(pow, ::pow)\n\n    #undef OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR\n    #undef OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR_NO_DOUBLE\n    #undef OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR\n\n    template<typename T> struct hypot_sqr_func : binary_function<T, T, float>\n    {\n        __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType src1, typename TypeTraits<T>::ParameterType src2) const\n        {\n            return src1 * src1 + src2 * src2;\n        }\n        __host__ __device__ __forceinline__ hypot_sqr_func() {}\n        __host__ __device__ __forceinline__ hypot_sqr_func(const hypot_sqr_func&) {}\n    };\n\n    // Saturate Cast Functor\n    template <typename T, typename D> struct saturate_cast_func : unary_function<T, D>\n    {\n        __device__ __forceinline__ D operator ()(typename TypeTraits<T>::ParameterType v) const\n        {\n            return saturate_cast<D>(v);\n        }\n        __host__ __device__ __forceinline__ saturate_cast_func() {}\n        __host__ __device__ __forceinline__ saturate_cast_func(const saturate_cast_func&) {}\n    };\n\n    // Threshold Functors\n    template <typename T> struct thresh_binary_func : unary_function<T, T>\n    {\n        __host__ __device__ __forceinline__ thresh_binary_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}\n\n        __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const\n        {\n            return (src > thresh) * maxVal;\n        }\n\n        __host__ __device__ __forceinline__ thresh_binary_func() {}\n        __host__ __device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other)\n            : thresh(other.thresh), maxVal(other.maxVal) {}\n\n        T thresh;\n        T maxVal;\n    };\n\n    template <typename T> struct thresh_binary_inv_func : unary_function<T, T>\n    {\n        __host__ __device__ __forceinline__ thresh_binary_inv_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}\n\n        __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const\n        {\n            return (src <= thresh) * maxVal;\n        }\n\n        __host__ __device__ __forceinline__ thresh_binary_inv_func() {}\n        __host__ __device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other)\n            : thresh(other.thresh), maxVal(other.maxVal) {}\n\n        T thresh;\n        T maxVal;\n    };\n\n    template <typename T> struct thresh_trunc_func : unary_function<T, T>\n    {\n        explicit __host__ __device__ __forceinline__ thresh_trunc_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;}\n\n        __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const\n        {\n            return minimum<T>()(src, thresh);\n        }\n\n        __host__ __device__ __forceinline__ thresh_trunc_func() {}\n        __host__ __device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other)\n            : thresh(other.thresh) {}\n\n        T thresh;\n    };\n\n    template <typename T> struct thresh_to_zero_func : unary_function<T, T>\n    {\n        explicit __host__ __device__ __forceinline__ thresh_to_zero_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;}\n\n        __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const\n        {\n            return (src > thresh) * src;\n        }\n\n        __host__ __device__ __forceinline__ thresh_to_zero_func() {}\n       __host__  __device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other)\n            : thresh(other.thresh) {}\n\n        T thresh;\n    };\n\n    template <typename T> struct thresh_to_zero_inv_func : unary_function<T, T>\n    {\n        explicit __host__ __device__ __forceinline__ thresh_to_zero_inv_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;}\n\n        __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const\n        {\n            return (src <= thresh) * src;\n        }\n\n        __host__ __device__ __forceinline__ thresh_to_zero_inv_func() {}\n        __host__ __device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other)\n            : thresh(other.thresh) {}\n\n        T thresh;\n    };\n\n    // Function Object Adaptors\n    template <typename Predicate> struct unary_negate : unary_function<typename Predicate::argument_type, bool>\n    {\n      explicit __host__ __device__ __forceinline__ unary_negate(const Predicate& p) : pred(p) {}\n\n      __device__ __forceinline__ bool operator()(typename TypeTraits<typename Predicate::argument_type>::ParameterType x) const\n      {\n          return !pred(x);\n      }\n\n      __host__ __device__ __forceinline__ unary_negate() {}\n      __host__ __device__ __forceinline__ unary_negate(const unary_negate& other) : pred(other.pred) {}\n\n      Predicate pred;\n    };\n\n    template <typename Predicate> __host__ __device__ __forceinline__ unary_negate<Predicate> not1(const Predicate& pred)\n    {\n        return unary_negate<Predicate>(pred);\n    }\n\n    template <typename Predicate> struct binary_negate : binary_function<typename Predicate::first_argument_type, typename Predicate::second_argument_type, bool>\n    {\n        explicit __host__ __device__ __forceinline__ binary_negate(const Predicate& p) : pred(p) {}\n\n        __device__ __forceinline__ bool operator()(typename TypeTraits<typename Predicate::first_argument_type>::ParameterType x,\n                                                   typename TypeTraits<typename Predicate::second_argument_type>::ParameterType y) const\n        {\n            return !pred(x,y);\n        }\n\n        __host__ __device__ __forceinline__ binary_negate() {}\n        __host__ __device__ __forceinline__ binary_negate(const binary_negate& other) : pred(other.pred) {}\n\n        Predicate pred;\n    };\n\n    template <typename BinaryPredicate> __host__ __device__ __forceinline__ binary_negate<BinaryPredicate> not2(const BinaryPredicate& pred)\n    {\n        return binary_negate<BinaryPredicate>(pred);\n    }\n\n    template <typename Op> struct binder1st : unary_function<typename Op::second_argument_type, typename Op::result_type>\n    {\n        __host__ __device__ __forceinline__ binder1st(const Op& op_, const typename Op::first_argument_type& arg1_) : op(op_), arg1(arg1_) {}\n\n        __device__ __forceinline__ typename Op::result_type operator ()(typename TypeTraits<typename Op::second_argument_type>::ParameterType a) const\n        {\n            return op(arg1, a);\n        }\n\n        __host__ __device__ __forceinline__ binder1st() {}\n        __host__ __device__ __forceinline__ binder1st(const binder1st& other) : op(other.op), arg1(other.arg1) {}\n\n        Op op;\n        typename Op::first_argument_type arg1;\n    };\n\n    template <typename Op, typename T> __host__ __device__ __forceinline__ binder1st<Op> bind1st(const Op& op, const T& x)\n    {\n        return binder1st<Op>(op, typename Op::first_argument_type(x));\n    }\n\n    template <typename Op> struct binder2nd : unary_function<typename Op::first_argument_type, typename Op::result_type>\n    {\n        __host__ __device__ __forceinline__ binder2nd(const Op& op_, const typename Op::second_argument_type& arg2_) : op(op_), arg2(arg2_) {}\n\n        __forceinline__ __device__ typename Op::result_type operator ()(typename TypeTraits<typename Op::first_argument_type>::ParameterType a) const\n        {\n            return op(a, arg2);\n        }\n\n        __host__ __device__ __forceinline__ binder2nd() {}\n        __host__ __device__ __forceinline__ binder2nd(const binder2nd& other) : op(other.op), arg2(other.arg2) {}\n\n        Op op;\n        typename Op::second_argument_type arg2;\n    };\n\n    template <typename Op, typename T> __host__ __device__ __forceinline__ binder2nd<Op> bind2nd(const Op& op, const T& x)\n    {\n        return binder2nd<Op>(op, typename Op::second_argument_type(x));\n    }\n\n    // Functor Traits\n    template <typename F> struct IsUnaryFunction\n    {\n        typedef char Yes;\n        struct No {Yes a[2];};\n\n        template <typename T, typename D> static Yes check(unary_function<T, D>);\n        static No check(...);\n\n        static F makeF();\n\n        enum { value = (sizeof(check(makeF())) == sizeof(Yes)) };\n    };\n\n    template <typename F> struct IsBinaryFunction\n    {\n        typedef char Yes;\n        struct No {Yes a[2];};\n\n        template <typename T1, typename T2, typename D> static Yes check(binary_function<T1, T2, D>);\n        static No check(...);\n\n        static F makeF();\n\n        enum { value = (sizeof(check(makeF())) == sizeof(Yes)) };\n    };\n\n    namespace functional_detail\n    {\n        template <size_t src_elem_size, size_t dst_elem_size> struct UnOpShift { enum { shift = 1 }; };\n        template <size_t src_elem_size> struct UnOpShift<src_elem_size, 1> { enum { shift = 4 }; };\n        template <size_t src_elem_size> struct UnOpShift<src_elem_size, 2> { enum { shift = 2 }; };\n\n        template <typename T, typename D> struct DefaultUnaryShift\n        {\n            enum { shift = UnOpShift<sizeof(T), sizeof(D)>::shift };\n        };\n\n        template <size_t src_elem_size1, size_t src_elem_size2, size_t dst_elem_size> struct BinOpShift { enum { shift = 1 }; };\n        template <size_t src_elem_size1, size_t src_elem_size2> struct BinOpShift<src_elem_size1, src_elem_size2, 1> { enum { shift = 4 }; };\n        template <size_t src_elem_size1, size_t src_elem_size2> struct BinOpShift<src_elem_size1, src_elem_size2, 2> { enum { shift = 2 }; };\n\n        template <typename T1, typename T2, typename D> struct DefaultBinaryShift\n        {\n            enum { shift = BinOpShift<sizeof(T1), sizeof(T2), sizeof(D)>::shift };\n        };\n\n        template <typename Func, bool unary = IsUnaryFunction<Func>::value> struct ShiftDispatcher;\n        template <typename Func> struct ShiftDispatcher<Func, true>\n        {\n            enum { shift = DefaultUnaryShift<typename Func::argument_type, typename Func::result_type>::shift };\n        };\n        template <typename Func> struct ShiftDispatcher<Func, false>\n        {\n            enum { shift = DefaultBinaryShift<typename Func::first_argument_type, typename Func::second_argument_type, typename Func::result_type>::shift };\n        };\n    }\n\n    template <typename Func> struct DefaultTransformShift\n    {\n        enum { shift = functional_detail::ShiftDispatcher<Func>::shift };\n    };\n\n    template <typename Func> struct DefaultTransformFunctorTraits\n    {\n        enum { simple_block_dim_x = 16 };\n        enum { simple_block_dim_y = 16 };\n\n        enum { smart_block_dim_x = 16 };\n        enum { smart_block_dim_y = 16 };\n        enum { smart_shift = DefaultTransformShift<Func>::shift };\n    };\n\n    template <typename Func> struct TransformFunctorTraits : DefaultTransformFunctorTraits<Func> {};\n\n#define OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(type) \\\n    template <> struct TransformFunctorTraits< type > : DefaultTransformFunctorTraits< type >\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_FUNCTIONAL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/limits.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_LIMITS_HPP__\n#define __OPENCV_CUDA_LIMITS_HPP__\n\n#include <limits.h>\n#include <float.h>\n#include \"common.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\ntemplate <class T> struct numeric_limits;\n\ntemplate <> struct numeric_limits<bool>\n{\n    __device__ __forceinline__ static bool min() { return false; }\n    __device__ __forceinline__ static bool max() { return true;  }\n    static const bool is_signed = false;\n};\n\ntemplate <> struct numeric_limits<signed char>\n{\n    __device__ __forceinline__ static signed char min() { return SCHAR_MIN; }\n    __device__ __forceinline__ static signed char max() { return SCHAR_MAX; }\n    static const bool is_signed = true;\n};\n\ntemplate <> struct numeric_limits<unsigned char>\n{\n    __device__ __forceinline__ static unsigned char min() { return 0; }\n    __device__ __forceinline__ static unsigned char max() { return UCHAR_MAX; }\n    static const bool is_signed = false;\n};\n\ntemplate <> struct numeric_limits<short>\n{\n    __device__ __forceinline__ static short min() { return SHRT_MIN; }\n    __device__ __forceinline__ static short max() { return SHRT_MAX; }\n    static const bool is_signed = true;\n};\n\ntemplate <> struct numeric_limits<unsigned short>\n{\n    __device__ __forceinline__ static unsigned short min() { return 0; }\n    __device__ __forceinline__ static unsigned short max() { return USHRT_MAX; }\n    static const bool is_signed = false;\n};\n\ntemplate <> struct numeric_limits<int>\n{\n    __device__ __forceinline__ static int min() { return INT_MIN; }\n    __device__ __forceinline__ static int max() { return INT_MAX; }\n    static const bool is_signed = true;\n};\n\ntemplate <> struct numeric_limits<unsigned int>\n{\n    __device__ __forceinline__ static unsigned int min() { return 0; }\n    __device__ __forceinline__ static unsigned int max() { return UINT_MAX; }\n    static const bool is_signed = false;\n};\n\ntemplate <> struct numeric_limits<float>\n{\n    __device__ __forceinline__ static float min() { return FLT_MIN; }\n    __device__ __forceinline__ static float max() { return FLT_MAX; }\n    __device__ __forceinline__ static float epsilon() { return FLT_EPSILON; }\n    static const bool is_signed = true;\n};\n\ntemplate <> struct numeric_limits<double>\n{\n    __device__ __forceinline__ static double min() { return DBL_MIN; }\n    __device__ __forceinline__ static double max() { return DBL_MAX; }\n    __device__ __forceinline__ static double epsilon() { return DBL_EPSILON; }\n    static const bool is_signed = true;\n};\n}}} // namespace cv { namespace cuda { namespace cudev {\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_LIMITS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/reduce.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_REDUCE_HPP__\n#define __OPENCV_CUDA_REDUCE_HPP__\n\n#include <thrust/tuple.h>\n#include \"detail/reduce.hpp\"\n#include \"detail/reduce_key_val.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template <int N, typename T, class Op>\n    __device__ __forceinline__ void reduce(volatile T* smem, T& val, unsigned int tid, const Op& op)\n    {\n        reduce_detail::Dispatcher<N>::reductor::template reduce<volatile T*, T&, const Op&>(smem, val, tid, op);\n    }\n    template <int N,\n              typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,\n              typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,\n              class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>\n    __device__ __forceinline__ void reduce(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,\n                                           const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,\n                                           unsigned int tid,\n                                           const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)\n    {\n        reduce_detail::Dispatcher<N>::reductor::template reduce<\n                const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>&,\n                const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>&,\n                const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>&>(smem, val, tid, op);\n    }\n\n    template <unsigned int N, typename K, typename V, class Cmp>\n    __device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key, volatile V* svals, V& val, unsigned int tid, const Cmp& cmp)\n    {\n        reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&, volatile V*, V&, const Cmp&>(skeys, key, svals, val, tid, cmp);\n    }\n    template <unsigned int N,\n              typename K,\n              typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,\n              typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,\n              class Cmp>\n    __device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key,\n                                                 const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,\n                                                 const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,\n                                                 unsigned int tid, const Cmp& cmp)\n    {\n        reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&,\n                const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>&,\n                const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>&,\n                const Cmp&>(skeys, key, svals, val, tid, cmp);\n    }\n    template <unsigned int N,\n              typename KP0, typename KP1, typename KP2, typename KP3, typename KP4, typename KP5, typename KP6, typename KP7, typename KP8, typename KP9,\n              typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,\n              typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,\n              typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,\n              class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>\n    __device__ __forceinline__ void reduceKeyVal(const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>& skeys,\n                                                 const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,\n                                                 const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,\n                                                 const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,\n                                                 unsigned int tid,\n                                                 const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp)\n    {\n        reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<\n                const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>&,\n                const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>&,\n                const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>&,\n                const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>&,\n                const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>&\n                >(skeys, key, svals, val, tid, cmp);\n    }\n\n    // smem_tuple\n\n    template <typename T0>\n    __device__ __forceinline__\n    thrust::tuple<volatile T0*>\n    smem_tuple(T0* t0)\n    {\n        return thrust::make_tuple((volatile T0*) t0);\n    }\n\n    template <typename T0, typename T1>\n    __device__ __forceinline__\n    thrust::tuple<volatile T0*, volatile T1*>\n    smem_tuple(T0* t0, T1* t1)\n    {\n        return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1);\n    }\n\n    template <typename T0, typename T1, typename T2>\n    __device__ __forceinline__\n    thrust::tuple<volatile T0*, volatile T1*, volatile T2*>\n    smem_tuple(T0* t0, T1* t1, T2* t2)\n    {\n        return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2);\n    }\n\n    template <typename T0, typename T1, typename T2, typename T3>\n    __device__ __forceinline__\n    thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*>\n    smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3)\n    {\n        return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3);\n    }\n\n    template <typename T0, typename T1, typename T2, typename T3, typename T4>\n    __device__ __forceinline__\n    thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*>\n    smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4)\n    {\n        return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4);\n    }\n\n    template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>\n    __device__ __forceinline__\n    thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*>\n    smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5)\n    {\n        return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5);\n    }\n\n    template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>\n    __device__ __forceinline__\n    thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*>\n    smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6)\n    {\n        return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6);\n    }\n\n    template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>\n    __device__ __forceinline__\n    thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*>\n    smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7)\n    {\n        return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7);\n    }\n\n    template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>\n    __device__ __forceinline__\n    thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*, volatile T8*>\n    smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8)\n    {\n        return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8);\n    }\n\n    template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>\n    __device__ __forceinline__\n    thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*, volatile T8*, volatile T9*>\n    smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8, T9* t9)\n    {\n        return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8, (volatile T9*) t9);\n    }\n}}}\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_UTILITY_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/saturate_cast.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_SATURATE_CAST_HPP__\n#define __OPENCV_CUDA_SATURATE_CAST_HPP__\n\n#include \"common.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uchar v) { return _Tp(v); }\n    template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(schar v) { return _Tp(v); }\n    template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(ushort v) { return _Tp(v); }\n    template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(short v) { return _Tp(v); }\n    template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uint v) { return _Tp(v); }\n    template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(int v) { return _Tp(v); }\n    template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(float v) { return _Tp(v); }\n    template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(double v) { return _Tp(v); }\n\n    template<> __device__ __forceinline__ uchar saturate_cast<uchar>(schar v)\n    {\n        uint res = 0;\n        int vi = v;\n        asm(\"cvt.sat.u8.s8 %0, %1;\" : \"=r\"(res) : \"r\"(vi));\n        return res;\n    }\n    template<> __device__ __forceinline__ uchar saturate_cast<uchar>(short v)\n    {\n        uint res = 0;\n        asm(\"cvt.sat.u8.s16 %0, %1;\" : \"=r\"(res) : \"h\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ uchar saturate_cast<uchar>(ushort v)\n    {\n        uint res = 0;\n        asm(\"cvt.sat.u8.u16 %0, %1;\" : \"=r\"(res) : \"h\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ uchar saturate_cast<uchar>(int v)\n    {\n        uint res = 0;\n        asm(\"cvt.sat.u8.s32 %0, %1;\" : \"=r\"(res) : \"r\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ uchar saturate_cast<uchar>(uint v)\n    {\n        uint res = 0;\n        asm(\"cvt.sat.u8.u32 %0, %1;\" : \"=r\"(res) : \"r\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ uchar saturate_cast<uchar>(float v)\n    {\n        uint res = 0;\n        asm(\"cvt.rni.sat.u8.f32 %0, %1;\" : \"=r\"(res) : \"f\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ uchar saturate_cast<uchar>(double v)\n    {\n    #if __CUDA_ARCH__ >= 130\n        uint res = 0;\n        asm(\"cvt.rni.sat.u8.f64 %0, %1;\" : \"=r\"(res) : \"d\"(v));\n        return res;\n    #else\n        return saturate_cast<uchar>((float)v);\n    #endif\n    }\n\n    template<> __device__ __forceinline__ schar saturate_cast<schar>(uchar v)\n    {\n        uint res = 0;\n        uint vi = v;\n        asm(\"cvt.sat.s8.u8 %0, %1;\" : \"=r\"(res) : \"r\"(vi));\n        return res;\n    }\n    template<> __device__ __forceinline__ schar saturate_cast<schar>(short v)\n    {\n        uint res = 0;\n        asm(\"cvt.sat.s8.s16 %0, %1;\" : \"=r\"(res) : \"h\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ schar saturate_cast<schar>(ushort v)\n    {\n        uint res = 0;\n        asm(\"cvt.sat.s8.u16 %0, %1;\" : \"=r\"(res) : \"h\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ schar saturate_cast<schar>(int v)\n    {\n        uint res = 0;\n        asm(\"cvt.sat.s8.s32 %0, %1;\" : \"=r\"(res) : \"r\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ schar saturate_cast<schar>(uint v)\n    {\n        uint res = 0;\n        asm(\"cvt.sat.s8.u32 %0, %1;\" : \"=r\"(res) : \"r\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ schar saturate_cast<schar>(float v)\n    {\n        uint res = 0;\n        asm(\"cvt.rni.sat.s8.f32 %0, %1;\" : \"=r\"(res) : \"f\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ schar saturate_cast<schar>(double v)\n    {\n    #if __CUDA_ARCH__ >= 130\n        uint res = 0;\n        asm(\"cvt.rni.sat.s8.f64 %0, %1;\" : \"=r\"(res) : \"d\"(v));\n        return res;\n    #else\n        return saturate_cast<schar>((float)v);\n    #endif\n    }\n\n    template<> __device__ __forceinline__ ushort saturate_cast<ushort>(schar v)\n    {\n        ushort res = 0;\n        int vi = v;\n        asm(\"cvt.sat.u16.s8 %0, %1;\" : \"=h\"(res) : \"r\"(vi));\n        return res;\n    }\n    template<> __device__ __forceinline__ ushort saturate_cast<ushort>(short v)\n    {\n        ushort res = 0;\n        asm(\"cvt.sat.u16.s16 %0, %1;\" : \"=h\"(res) : \"h\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ ushort saturate_cast<ushort>(int v)\n    {\n        ushort res = 0;\n        asm(\"cvt.sat.u16.s32 %0, %1;\" : \"=h\"(res) : \"r\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ ushort saturate_cast<ushort>(uint v)\n    {\n        ushort res = 0;\n        asm(\"cvt.sat.u16.u32 %0, %1;\" : \"=h\"(res) : \"r\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ ushort saturate_cast<ushort>(float v)\n    {\n        ushort res = 0;\n        asm(\"cvt.rni.sat.u16.f32 %0, %1;\" : \"=h\"(res) : \"f\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ ushort saturate_cast<ushort>(double v)\n    {\n    #if __CUDA_ARCH__ >= 130\n        ushort res = 0;\n        asm(\"cvt.rni.sat.u16.f64 %0, %1;\" : \"=h\"(res) : \"d\"(v));\n        return res;\n    #else\n        return saturate_cast<ushort>((float)v);\n    #endif\n    }\n\n    template<> __device__ __forceinline__ short saturate_cast<short>(ushort v)\n    {\n        short res = 0;\n        asm(\"cvt.sat.s16.u16 %0, %1;\" : \"=h\"(res) : \"h\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ short saturate_cast<short>(int v)\n    {\n        short res = 0;\n        asm(\"cvt.sat.s16.s32 %0, %1;\" : \"=h\"(res) : \"r\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ short saturate_cast<short>(uint v)\n    {\n        short res = 0;\n        asm(\"cvt.sat.s16.u32 %0, %1;\" : \"=h\"(res) : \"r\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ short saturate_cast<short>(float v)\n    {\n        short res = 0;\n        asm(\"cvt.rni.sat.s16.f32 %0, %1;\" : \"=h\"(res) : \"f\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ short saturate_cast<short>(double v)\n    {\n    #if __CUDA_ARCH__ >= 130\n        short res = 0;\n        asm(\"cvt.rni.sat.s16.f64 %0, %1;\" : \"=h\"(res) : \"d\"(v));\n        return res;\n    #else\n        return saturate_cast<short>((float)v);\n    #endif\n    }\n\n    template<> __device__ __forceinline__ int saturate_cast<int>(uint v)\n    {\n        int res = 0;\n        asm(\"cvt.sat.s32.u32 %0, %1;\" : \"=r\"(res) : \"r\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ int saturate_cast<int>(float v)\n    {\n        return __float2int_rn(v);\n    }\n    template<> __device__ __forceinline__ int saturate_cast<int>(double v)\n    {\n    #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130\n        return __double2int_rn(v);\n    #else\n        return saturate_cast<int>((float)v);\n    #endif\n    }\n\n    template<> __device__ __forceinline__ uint saturate_cast<uint>(schar v)\n    {\n        uint res = 0;\n        int vi = v;\n        asm(\"cvt.sat.u32.s8 %0, %1;\" : \"=r\"(res) : \"r\"(vi));\n        return res;\n    }\n    template<> __device__ __forceinline__ uint saturate_cast<uint>(short v)\n    {\n        uint res = 0;\n        asm(\"cvt.sat.u32.s16 %0, %1;\" : \"=r\"(res) : \"h\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ uint saturate_cast<uint>(int v)\n    {\n        uint res = 0;\n        asm(\"cvt.sat.u32.s32 %0, %1;\" : \"=r\"(res) : \"r\"(v));\n        return res;\n    }\n    template<> __device__ __forceinline__ uint saturate_cast<uint>(float v)\n    {\n        return __float2uint_rn(v);\n    }\n    template<> __device__ __forceinline__ uint saturate_cast<uint>(double v)\n    {\n    #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130\n        return __double2uint_rn(v);\n    #else\n        return saturate_cast<uint>((float)v);\n    #endif\n    }\n}}}\n\n//! @endcond\n\n#endif /* __OPENCV_CUDA_SATURATE_CAST_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/scan.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_SCAN_HPP__\n#define __OPENCV_CUDA_SCAN_HPP__\n\n#include \"opencv2/core/cuda/common.hpp\"\n#include \"opencv2/core/cuda/utility.hpp\"\n#include \"opencv2/core/cuda/warp.hpp\"\n#include \"opencv2/core/cuda/warp_shuffle.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    enum ScanKind { EXCLUSIVE = 0,  INCLUSIVE = 1 };\n\n    template <ScanKind Kind, typename T, typename F> struct WarpScan\n    {\n        __device__ __forceinline__ WarpScan() {}\n        __device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; }\n\n        __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)\n        {\n            const unsigned int lane = idx & 31;\n            F op;\n\n            if ( lane >=  1) ptr [idx ] = op(ptr [idx -  1], ptr [idx]);\n            if ( lane >=  2) ptr [idx ] = op(ptr [idx -  2], ptr [idx]);\n            if ( lane >=  4) ptr [idx ] = op(ptr [idx -  4], ptr [idx]);\n            if ( lane >=  8) ptr [idx ] = op(ptr [idx -  8], ptr [idx]);\n            if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);\n\n            if( Kind == INCLUSIVE )\n                return ptr [idx];\n            else\n                return (lane > 0) ? ptr [idx - 1] : 0;\n        }\n\n        __device__ __forceinline__ unsigned int index(const unsigned int tid)\n        {\n            return tid;\n        }\n\n        __device__ __forceinline__ void init(volatile T *ptr){}\n\n        static const int warp_offset      = 0;\n\n        typedef WarpScan<INCLUSIVE, T, F>  merge;\n    };\n\n    template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp\n    {\n        __device__ __forceinline__ WarpScanNoComp() {}\n        __device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; }\n\n        __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)\n        {\n            const unsigned int lane = threadIdx.x & 31;\n            F op;\n\n            ptr [idx ] = op(ptr [idx -  1], ptr [idx]);\n            ptr [idx ] = op(ptr [idx -  2], ptr [idx]);\n            ptr [idx ] = op(ptr [idx -  4], ptr [idx]);\n            ptr [idx ] = op(ptr [idx -  8], ptr [idx]);\n            ptr [idx ] = op(ptr [idx - 16], ptr [idx]);\n\n            if( Kind == INCLUSIVE )\n                return ptr [idx];\n            else\n                return (lane > 0) ? ptr [idx - 1] : 0;\n        }\n\n        __device__ __forceinline__ unsigned int index(const unsigned int tid)\n        {\n            return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);\n        }\n\n        __device__ __forceinline__ void init(volatile T *ptr)\n        {\n            ptr[threadIdx.x] = 0;\n        }\n\n        static const int warp_smem_stride = 32 + 16 + 1;\n        static const int warp_offset      = 16;\n        static const int warp_log         = 5;\n        static const int warp_mask        = 31;\n\n        typedef WarpScanNoComp<INCLUSIVE, T, F> merge;\n    };\n\n    template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan\n    {\n        __device__ __forceinline__ BlockScan() {}\n        __device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; }\n\n        __device__ __forceinline__ T operator()(volatile T *ptr)\n        {\n            const unsigned int tid  = threadIdx.x;\n            const unsigned int lane = tid & warp_mask;\n            const unsigned int warp = tid >> warp_log;\n\n            Sc scan;\n            typename Sc::merge merge_scan;\n            const unsigned int idx = scan.index(tid);\n\n            T val = scan(ptr, idx);\n            __syncthreads ();\n\n            if( warp == 0)\n                scan.init(ptr);\n            __syncthreads ();\n\n            if( lane == 31 )\n                ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx];\n            __syncthreads ();\n\n            if( warp == 0 )\n                merge_scan(ptr, idx);\n            __syncthreads();\n\n            if ( warp > 0)\n                val = ptr [scan.warp_offset + warp - 1] + val;\n            __syncthreads ();\n\n            ptr[idx] = val;\n            __syncthreads ();\n\n            return val ;\n        }\n\n        static const int warp_log  = 5;\n        static const int warp_mask = 31;\n    };\n\n    template <typename T>\n    __device__ T warpScanInclusive(T idata, volatile T* s_Data, unsigned int tid)\n    {\n    #if __CUDA_ARCH__ >= 300\n        const unsigned int laneId = cv::cuda::device::Warp::laneId();\n\n        // scan on shuffl functions\n        #pragma unroll\n        for (int i = 1; i <= (OPENCV_CUDA_WARP_SIZE / 2); i *= 2)\n        {\n            const T n = cv::cuda::device::shfl_up(idata, i);\n            if (laneId >= i)\n                  idata += n;\n        }\n\n        return idata;\n    #else\n        unsigned int pos = 2 * tid - (tid & (OPENCV_CUDA_WARP_SIZE - 1));\n        s_Data[pos] = 0;\n        pos += OPENCV_CUDA_WARP_SIZE;\n        s_Data[pos] = idata;\n\n        s_Data[pos] += s_Data[pos - 1];\n        s_Data[pos] += s_Data[pos - 2];\n        s_Data[pos] += s_Data[pos - 4];\n        s_Data[pos] += s_Data[pos - 8];\n        s_Data[pos] += s_Data[pos - 16];\n\n        return s_Data[pos];\n    #endif\n    }\n\n    template <typename T>\n    __device__ __forceinline__ T warpScanExclusive(T idata, volatile T* s_Data, unsigned int tid)\n    {\n        return warpScanInclusive(idata, s_Data, tid) - idata;\n    }\n\n    template <int tiNumScanThreads, typename T>\n    __device__ T blockScanInclusive(T idata, volatile T* s_Data, unsigned int tid)\n    {\n        if (tiNumScanThreads > OPENCV_CUDA_WARP_SIZE)\n        {\n            //Bottom-level inclusive warp scan\n            T warpResult = warpScanInclusive(idata, s_Data, tid);\n\n            //Save top elements of each warp for exclusive warp scan\n            //sync to wait for warp scans to complete (because s_Data is being overwritten)\n            __syncthreads();\n            if ((tid & (OPENCV_CUDA_WARP_SIZE - 1)) == (OPENCV_CUDA_WARP_SIZE - 1))\n            {\n                s_Data[tid >> OPENCV_CUDA_LOG_WARP_SIZE] = warpResult;\n            }\n\n            //wait for warp scans to complete\n            __syncthreads();\n\n            if (tid < (tiNumScanThreads / OPENCV_CUDA_WARP_SIZE) )\n            {\n                //grab top warp elements\n                T val = s_Data[tid];\n                //calculate exclusive scan and write back to shared memory\n                s_Data[tid] = warpScanExclusive(val, s_Data, tid);\n            }\n\n            //return updated warp scans with exclusive scan results\n            __syncthreads();\n\n            return warpResult + s_Data[tid >> OPENCV_CUDA_LOG_WARP_SIZE];\n        }\n        else\n        {\n            return warpScanInclusive(idata, s_Data, tid);\n        }\n    }\n}}}\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_SCAN_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/simd_functions.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n/*\n * Copyright (c) 2013 NVIDIA Corporation. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n *   Redistributions of source code must retain the above copyright notice,\n *   this list of conditions and the following disclaimer.\n *\n *   Redistributions in binary form must reproduce the above copyright notice,\n *   this list of conditions and the following disclaimer in the documentation\n *   and/or other materials provided with the distribution.\n *\n *   Neither the name of NVIDIA Corporation nor the names of its contributors\n *   may be used to endorse or promote products derived from this software\n *   without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n */\n\n#ifndef __OPENCV_CUDA_SIMD_FUNCTIONS_HPP__\n#define __OPENCV_CUDA_SIMD_FUNCTIONS_HPP__\n\n#include \"common.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    // 2\n\n    static __device__ __forceinline__ unsigned int vadd2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vadd2.u32.u32.u32.sat %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #elif __CUDA_ARCH__ >= 200\n        asm(\"vadd.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vadd.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int s;\n        s = a ^ b;          // sum bits\n        r = a + b;          // actual sum\n        s = s ^ r;          // determine carry-ins for each bit position\n        s = s & 0x00010000; // carry-in to high word (= carry-out from low word)\n        r = r - s;          // subtract out carry-out from low word\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsub2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vsub2.u32.u32.u32.sat %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #elif __CUDA_ARCH__ >= 200\n        asm(\"vsub.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vsub.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int s;\n        s = a ^ b;          // sum bits\n        r = a - b;          // actual sum\n        s = s ^ r;          // determine carry-ins for each bit position\n        s = s & 0x00010000; // borrow to high word\n        r = r + s;          // compensate for borrow from low word\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vabsdiff2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vabsdiff2.u32.u32.u32.sat %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #elif __CUDA_ARCH__ >= 200\n        asm(\"vabsdiff.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vabsdiff.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int s, t, u, v;\n        s = a & 0x0000ffff; // extract low halfword\n        r = b & 0x0000ffff; // extract low halfword\n        u = ::max(r, s);    // maximum of low halfwords\n        v = ::min(r, s);    // minimum of low halfwords\n        s = a & 0xffff0000; // extract high halfword\n        r = b & 0xffff0000; // extract high halfword\n        t = ::max(r, s);    // maximum of high halfwords\n        s = ::min(r, s);    // minimum of high halfwords\n        r = u | t;          // maximum of both halfwords\n        s = v | s;          // minimum of both halfwords\n        r = r - s;          // |a - b| = max(a,b) - min(a,b);\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vavg2(unsigned int a, unsigned int b)\n    {\n        unsigned int r, s;\n\n        // HAKMEM #23: a + b = 2 * (a & b) + (a ^ b) ==>\n        // (a + b) / 2 = (a & b) + ((a ^ b) >> 1)\n        s = a ^ b;\n        r = a & b;\n        s = s & 0xfffefffe; // ensure shift doesn't cross halfword boundaries\n        s = s >> 1;\n        s = r + s;\n\n        return s;\n    }\n\n    static __device__ __forceinline__ unsigned int vavrg2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vavrg2.u32.u32.u32 %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        // HAKMEM #23: a + b = 2 * (a | b) - (a ^ b) ==>\n        // (a + b + 1) / 2 = (a | b) - ((a ^ b) >> 1)\n        unsigned int s;\n        s = a ^ b;\n        r = a | b;\n        s = s & 0xfffefffe; // ensure shift doesn't cross half-word boundaries\n        s = s >> 1;\n        r = r - s;\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vseteq2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset2.u32.u32.eq %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        // inspired by Alan Mycroft's null-byte detection algorithm:\n        // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))\n        unsigned int c;\n        r = a ^ b;          // 0x0000 if a == b\n        c = r | 0x80008000; // set msbs, to catch carry out\n        r = r ^ c;          // extract msbs, msb = 1 if r < 0x8000\n        c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000\n        c = r & ~c;         // msb = 1, if r was 0x0000\n        r = c >> 15;        // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmpeq2(unsigned int a, unsigned int b)\n    {\n        unsigned int r, c;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vseteq2(a, b);\n        c = r << 16;        // convert bool\n        r = c - r;          //  into mask\n    #else\n        // inspired by Alan Mycroft's null-byte detection algorithm:\n        // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))\n        r = a ^ b;          // 0x0000 if a == b\n        c = r | 0x80008000; // set msbs, to catch carry out\n        r = r ^ c;          // extract msbs, msb = 1 if r < 0x8000\n        c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000\n        c = r & ~c;         // msb = 1, if r was 0x0000\n        r = c >> 15;        // convert\n        r = c - r;          //  msbs to\n        r = c | r;          //   mask\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsetge2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset2.u32.u32.ge %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int c;\n        asm(\"not.b32 %0, %0;\" : \"+r\"(b));\n        c = vavrg2(a, b);   // (a + ~b + 1) / 2 = (a - b) / 2\n        c = c & 0x80008000; // msb = carry-outs\n        r = c >> 15;        // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmpge2(unsigned int a, unsigned int b)\n    {\n        unsigned int r, c;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vsetge2(a, b);\n        c = r << 16;        // convert bool\n        r = c - r;          //  into mask\n    #else\n        asm(\"not.b32 %0, %0;\" : \"+r\"(b));\n        c = vavrg2(a, b);   // (a + ~b + 1) / 2 = (a - b) / 2\n        c = c & 0x80008000; // msb = carry-outs\n        r = c >> 15;        // convert\n        r = c - r;          //  msbs to\n        r = c | r;          //   mask\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsetgt2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset2.u32.u32.gt %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int c;\n        asm(\"not.b32 %0, %0;\" : \"+r\"(b));\n        c = vavg2(a, b);    // (a + ~b) / 2 = (a - b) / 2 [rounded down]\n        c = c & 0x80008000; // msbs = carry-outs\n        r = c >> 15;        // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmpgt2(unsigned int a, unsigned int b)\n    {\n        unsigned int r, c;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vsetgt2(a, b);\n        c = r << 16;        // convert bool\n        r = c - r;          //  into mask\n    #else\n        asm(\"not.b32 %0, %0;\" : \"+r\"(b));\n        c = vavg2(a, b);    // (a + ~b) / 2 = (a - b) / 2 [rounded down]\n        c = c & 0x80008000; // msbs = carry-outs\n        r = c >> 15;        // convert\n        r = c - r;          //  msbs to\n        r = c | r;          //   mask\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsetle2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset2.u32.u32.le %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int c;\n        asm(\"not.b32 %0, %0;\" : \"+r\"(a));\n        c = vavrg2(a, b);   // (b + ~a + 1) / 2 = (b - a) / 2\n        c = c & 0x80008000; // msb = carry-outs\n        r = c >> 15;        // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmple2(unsigned int a, unsigned int b)\n    {\n        unsigned int r, c;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vsetle2(a, b);\n        c = r << 16;        // convert bool\n        r = c - r;          //  into mask\n    #else\n        asm(\"not.b32 %0, %0;\" : \"+r\"(a));\n        c = vavrg2(a, b);   // (b + ~a + 1) / 2 = (b - a) / 2\n        c = c & 0x80008000; // msb = carry-outs\n        r = c >> 15;        // convert\n        r = c - r;          //  msbs to\n        r = c | r;          //   mask\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsetlt2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset2.u32.u32.lt %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int c;\n        asm(\"not.b32 %0, %0;\" : \"+r\"(a));\n        c = vavg2(a, b);    // (b + ~a) / 2 = (b - a) / 2 [rounded down]\n        c = c & 0x80008000; // msb = carry-outs\n        r = c >> 15;        // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmplt2(unsigned int a, unsigned int b)\n    {\n        unsigned int r, c;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vsetlt2(a, b);\n        c = r << 16;        // convert bool\n        r = c - r;          //  into mask\n    #else\n        asm(\"not.b32 %0, %0;\" : \"+r\"(a));\n        c = vavg2(a, b);    // (b + ~a) / 2 = (b - a) / 2 [rounded down]\n        c = c & 0x80008000; // msb = carry-outs\n        r = c >> 15;        // convert\n        r = c - r;          //  msbs to\n        r = c | r;          //   mask\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsetne2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm (\"vset2.u32.u32.ne %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        // inspired by Alan Mycroft's null-byte detection algorithm:\n        // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))\n        unsigned int c;\n        r = a ^ b;          // 0x0000 if a == b\n        c = r | 0x80008000; // set msbs, to catch carry out\n        c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000\n        c = r | c;          // msb = 1, if r was not 0x0000\n        c = c & 0x80008000; // extract msbs\n        r = c >> 15;        // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmpne2(unsigned int a, unsigned int b)\n    {\n        unsigned int r, c;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vsetne2(a, b);\n        c = r << 16;        // convert bool\n        r = c - r;          //  into mask\n    #else\n        // inspired by Alan Mycroft's null-byte detection algorithm:\n        // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))\n        r = a ^ b;          // 0x0000 if a == b\n        c = r | 0x80008000; // set msbs, to catch carry out\n        c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000\n        c = r | c;          // msb = 1, if r was not 0x0000\n        c = c & 0x80008000; // extract msbs\n        r = c >> 15;        // convert\n        r = c - r;          //  msbs to\n        r = c | r;          //   mask\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vmax2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vmax2.u32.u32.u32 %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #elif __CUDA_ARCH__ >= 200\n        asm(\"vmax.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vmax.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int s, t, u;\n        r = a & 0x0000ffff; // extract low halfword\n        s = b & 0x0000ffff; // extract low halfword\n        t = ::max(r, s);    // maximum of low halfwords\n        r = a & 0xffff0000; // extract high halfword\n        s = b & 0xffff0000; // extract high halfword\n        u = ::max(r, s);    // maximum of high halfwords\n        r = t | u;          // combine halfword maximums\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vmin2(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vmin2.u32.u32.u32 %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #elif __CUDA_ARCH__ >= 200\n        asm(\"vmin.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vmin.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int s, t, u;\n        r = a & 0x0000ffff; // extract low halfword\n        s = b & 0x0000ffff; // extract low halfword\n        t = ::min(r, s);    // minimum of low halfwords\n        r = a & 0xffff0000; // extract high halfword\n        s = b & 0xffff0000; // extract high halfword\n        u = ::min(r, s);    // minimum of high halfwords\n        r = t | u;          // combine halfword minimums\n    #endif\n\n        return r;\n    }\n\n    // 4\n\n    static __device__ __forceinline__ unsigned int vadd4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vadd4.u32.u32.u32.sat %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #elif __CUDA_ARCH__ >= 200\n        asm(\"vadd.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vadd.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vadd.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vadd.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int s, t;\n        s = a ^ b;          // sum bits\n        r = a & 0x7f7f7f7f; // clear msbs\n        t = b & 0x7f7f7f7f; // clear msbs\n        s = s & 0x80808080; // msb sum bits\n        r = r + t;          // add without msbs, record carry-out in msbs\n        r = r ^ s;          // sum of msb sum and carry-in bits, w/o carry-out\n    #endif /* __CUDA_ARCH__ >= 300 */\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsub4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vsub4.u32.u32.u32.sat %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #elif __CUDA_ARCH__ >= 200\n        asm(\"vsub.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vsub.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vsub.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vsub.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int s, t;\n        s = a ^ ~b;         // inverted sum bits\n        r = a | 0x80808080; // set msbs\n        t = b & 0x7f7f7f7f; // clear msbs\n        s = s & 0x80808080; // inverted msb sum bits\n        r = r - t;          // subtract w/o msbs, record inverted borrows in msb\n        r = r ^ s;          // combine inverted msb sum bits and borrows\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vavg4(unsigned int a, unsigned int b)\n    {\n        unsigned int r, s;\n\n        // HAKMEM #23: a + b = 2 * (a & b) + (a ^ b) ==>\n        // (a + b) / 2 = (a & b) + ((a ^ b) >> 1)\n        s = a ^ b;\n        r = a & b;\n        s = s & 0xfefefefe; // ensure following shift doesn't cross byte boundaries\n        s = s >> 1;\n        s = r + s;\n\n        return s;\n    }\n\n    static __device__ __forceinline__ unsigned int vavrg4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vavrg4.u32.u32.u32 %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        // HAKMEM #23: a + b = 2 * (a | b) - (a ^ b) ==>\n        // (a + b + 1) / 2 = (a | b) - ((a ^ b) >> 1)\n        unsigned int c;\n        c = a ^ b;\n        r = a | b;\n        c = c & 0xfefefefe; // ensure following shift doesn't cross byte boundaries\n        c = c >> 1;\n        r = r - c;\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vseteq4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset4.u32.u32.eq %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        // inspired by Alan Mycroft's null-byte detection algorithm:\n        // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))\n        unsigned int c;\n        r = a ^ b;          // 0x00 if a == b\n        c = r | 0x80808080; // set msbs, to catch carry out\n        r = r ^ c;          // extract msbs, msb = 1 if r < 0x80\n        c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80\n        c = r & ~c;         // msb = 1, if r was 0x00\n        r = c >> 7;         // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmpeq4(unsigned int a, unsigned int b)\n    {\n        unsigned int r, t;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vseteq4(a, b);\n        t = r << 8;         // convert bool\n        r = t - r;          //  to mask\n    #else\n        // inspired by Alan Mycroft's null-byte detection algorithm:\n        // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))\n        t = a ^ b;          // 0x00 if a == b\n        r = t | 0x80808080; // set msbs, to catch carry out\n        t = t ^ r;          // extract msbs, msb = 1 if t < 0x80\n        r = r - 0x01010101; // msb = 0, if t was 0x00 or 0x80\n        r = t & ~r;         // msb = 1, if t was 0x00\n        t = r >> 7;         // build mask\n        t = r - t;          //  from\n        r = t | r;          //   msbs\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsetle4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset4.u32.u32.le %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int c;\n        asm(\"not.b32 %0, %0;\" : \"+r\"(a));\n        c = vavrg4(a, b);   // (b + ~a + 1) / 2 = (b - a) / 2\n        c = c & 0x80808080; // msb = carry-outs\n        r = c >> 7;         // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmple4(unsigned int a, unsigned int b)\n    {\n        unsigned int r, c;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vsetle4(a, b);\n        c = r << 8;         // convert bool\n        r = c - r;          //  to mask\n    #else\n        asm(\"not.b32 %0, %0;\" : \"+r\"(a));\n        c = vavrg4(a, b);   // (b + ~a + 1) / 2 = (b - a) / 2\n        c = c & 0x80808080; // msbs = carry-outs\n        r = c >> 7;         // convert\n        r = c - r;          //  msbs to\n        r = c | r;          //   mask\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsetlt4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset4.u32.u32.lt %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int c;\n        asm(\"not.b32 %0, %0;\" : \"+r\"(a));\n        c = vavg4(a, b);    // (b + ~a) / 2 = (b - a) / 2 [rounded down]\n        c = c & 0x80808080; // msb = carry-outs\n        r = c >> 7;         // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmplt4(unsigned int a, unsigned int b)\n    {\n        unsigned int r, c;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vsetlt4(a, b);\n        c = r << 8;         // convert bool\n        r = c - r;          //  to mask\n    #else\n        asm(\"not.b32 %0, %0;\" : \"+r\"(a));\n        c = vavg4(a, b);    // (b + ~a) / 2 = (b - a) / 2 [rounded down]\n        c = c & 0x80808080; // msbs = carry-outs\n        r = c >> 7;         // convert\n        r = c - r;          //  msbs to\n        r = c | r;          //   mask\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsetge4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset4.u32.u32.ge %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int c;\n        asm(\"not.b32 %0, %0;\" : \"+r\"(b));\n        c = vavrg4(a, b);   // (a + ~b + 1) / 2 = (a - b) / 2\n        c = c & 0x80808080; // msb = carry-outs\n        r = c >> 7;         // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmpge4(unsigned int a, unsigned int b)\n    {\n        unsigned int r, s;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vsetge4(a, b);\n        s = r << 8;         // convert bool\n        r = s - r;          //  to mask\n    #else\n        asm (\"not.b32 %0,%0;\" : \"+r\"(b));\n        r = vavrg4 (a, b);  // (a + ~b + 1) / 2 = (a - b) / 2\n        r = r & 0x80808080; // msb = carry-outs\n        s = r >> 7;         // build mask\n        s = r - s;          //  from\n        r = s | r;          //   msbs\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsetgt4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset4.u32.u32.gt %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int c;\n        asm(\"not.b32 %0, %0;\" : \"+r\"(b));\n        c = vavg4(a, b);    // (a + ~b) / 2 = (a - b) / 2 [rounded down]\n        c = c & 0x80808080; // msb = carry-outs\n        r = c >> 7;         // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmpgt4(unsigned int a, unsigned int b)\n    {\n        unsigned int r, c;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vsetgt4(a, b);\n        c = r << 8;         // convert bool\n        r = c - r;          //  to mask\n    #else\n        asm(\"not.b32 %0, %0;\" : \"+r\"(b));\n        c = vavg4(a, b);    // (a + ~b) / 2 = (a - b) / 2 [rounded down]\n        c = c & 0x80808080; // msb = carry-outs\n        r = c >> 7;         // convert\n        r = c - r;          //  msbs to\n        r = c | r;          //   mask\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vsetne4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vset4.u32.u32.ne %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        // inspired by Alan Mycroft's null-byte detection algorithm:\n        // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))\n        unsigned int c;\n        r = a ^ b;          // 0x00 if a == b\n        c = r | 0x80808080; // set msbs, to catch carry out\n        c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80\n        c = r | c;          // msb = 1, if r was not 0x00\n        c = c & 0x80808080; // extract msbs\n        r = c >> 7;         // convert to bool\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vcmpne4(unsigned int a, unsigned int b)\n    {\n        unsigned int r, c;\n\n    #if __CUDA_ARCH__ >= 300\n        r = vsetne4(a, b);\n        c = r << 8;         // convert bool\n        r = c - r;          //  to mask\n    #else\n        // inspired by Alan Mycroft's null-byte detection algorithm:\n        // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))\n        r = a ^ b;          // 0x00 if a == b\n        c = r | 0x80808080; // set msbs, to catch carry out\n        c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80\n        c = r | c;          // msb = 1, if r was not 0x00\n        c = c & 0x80808080; // extract msbs\n        r = c >> 7;         // convert\n        r = c - r;          //  msbs to\n        r = c | r;          //   mask\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vabsdiff4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vabsdiff4.u32.u32.u32.sat %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #elif __CUDA_ARCH__ >= 200\n        asm(\"vabsdiff.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vabsdiff.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vabsdiff.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vabsdiff.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int s;\n        s = vcmpge4(a, b);  // mask = 0xff if a >= b\n        r = a ^ b;          //\n        s = (r &  s) ^ b;   // select a when a >= b, else select b => max(a,b)\n        r = s ^ r;          // select a when b >= a, else select b => min(a,b)\n        r = s - r;          // |a - b| = max(a,b) - min(a,b);\n    #endif\n\n        return r;\n    }\n\n    static __device__ __forceinline__ unsigned int vmax4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vmax4.u32.u32.u32 %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #elif __CUDA_ARCH__ >= 200\n        asm(\"vmax.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vmax.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vmax.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vmax.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int s;\n        s = vcmpge4(a, b);  // mask = 0xff if a >= b\n        r = a & s;          // select a when b >= a\n        s = b & ~s;         // select b when b < a\n        r = r | s;          // combine byte selections\n    #endif\n\n        return r;           // byte-wise unsigned maximum\n    }\n\n    static __device__ __forceinline__ unsigned int vmin4(unsigned int a, unsigned int b)\n    {\n        unsigned int r = 0;\n\n    #if __CUDA_ARCH__ >= 300\n        asm(\"vmin4.u32.u32.u32 %0, %1, %2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #elif __CUDA_ARCH__ >= 200\n        asm(\"vmin.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vmin.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vmin.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n        asm(\"vmin.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;\" : \"=r\"(r) : \"r\"(a), \"r\"(b), \"r\"(r));\n    #else\n        unsigned int s;\n        s = vcmpge4(b, a);  // mask = 0xff if a >= b\n        r = a & s;          // select a when b >= a\n        s = b & ~s;         // select b when b < a\n        r = r | s;          // combine byte selections\n    #endif\n\n        return r;\n    }\n}}}\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_SIMD_FUNCTIONS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/transform.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_TRANSFORM_HPP__\n#define __OPENCV_CUDA_TRANSFORM_HPP__\n\n#include \"common.hpp\"\n#include \"utility.hpp\"\n#include \"detail/transform_detail.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template <typename T, typename D, typename UnOp, typename Mask>\n    static inline void transform(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, const Mask& mask, cudaStream_t stream)\n    {\n        typedef TransformFunctorTraits<UnOp> ft;\n        transform_detail::TransformDispatcher<VecTraits<T>::cn == 1 && VecTraits<D>::cn == 1 && ft::smart_shift != 1>::call(src, dst, op, mask, stream);\n    }\n\n    template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\n    static inline void transform(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, const Mask& mask, cudaStream_t stream)\n    {\n        typedef TransformFunctorTraits<BinOp> ft;\n        transform_detail::TransformDispatcher<VecTraits<T1>::cn == 1 && VecTraits<T2>::cn == 1 && VecTraits<D>::cn == 1 && ft::smart_shift != 1>::call(src1, src2, dst, op, mask, stream);\n    }\n}}}\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_TRANSFORM_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/type_traits.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_TYPE_TRAITS_HPP__\n#define __OPENCV_CUDA_TYPE_TRAITS_HPP__\n\n#include \"detail/type_traits_detail.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template <typename T> struct IsSimpleParameter\n    {\n        enum {value = type_traits_detail::IsIntegral<T>::value || type_traits_detail::IsFloat<T>::value ||\n            type_traits_detail::PointerTraits<typename type_traits_detail::ReferenceTraits<T>::type>::value};\n    };\n\n    template <typename T> struct TypeTraits\n    {\n        typedef typename type_traits_detail::UnConst<T>::type                                                NonConstType;\n        typedef typename type_traits_detail::UnVolatile<T>::type                                             NonVolatileType;\n        typedef typename type_traits_detail::UnVolatile<typename type_traits_detail::UnConst<T>::type>::type UnqualifiedType;\n        typedef typename type_traits_detail::PointerTraits<UnqualifiedType>::type                            PointeeType;\n        typedef typename type_traits_detail::ReferenceTraits<T>::type                                        ReferredType;\n\n        enum { isConst          = type_traits_detail::UnConst<T>::value };\n        enum { isVolatile       = type_traits_detail::UnVolatile<T>::value };\n\n        enum { isReference      = type_traits_detail::ReferenceTraits<UnqualifiedType>::value };\n        enum { isPointer        = type_traits_detail::PointerTraits<typename type_traits_detail::ReferenceTraits<UnqualifiedType>::type>::value };\n\n        enum { isUnsignedInt    = type_traits_detail::IsUnsignedIntegral<UnqualifiedType>::value };\n        enum { isSignedInt      = type_traits_detail::IsSignedIntergral<UnqualifiedType>::value };\n        enum { isIntegral       = type_traits_detail::IsIntegral<UnqualifiedType>::value };\n        enum { isFloat          = type_traits_detail::IsFloat<UnqualifiedType>::value };\n        enum { isArith          = isIntegral || isFloat };\n        enum { isVec            = type_traits_detail::IsVec<UnqualifiedType>::value };\n\n        typedef typename type_traits_detail::Select<IsSimpleParameter<UnqualifiedType>::value,\n            T, typename type_traits_detail::AddParameterType<T>::type>::type ParameterType;\n    };\n}}}\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_TYPE_TRAITS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/utility.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_UTILITY_HPP__\n#define __OPENCV_CUDA_UTILITY_HPP__\n\n#include \"saturate_cast.hpp\"\n#include \"datamov_utils.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    #define OPENCV_CUDA_LOG_WARP_SIZE        (5)\n    #define OPENCV_CUDA_WARP_SIZE            (1 << OPENCV_CUDA_LOG_WARP_SIZE)\n    #define OPENCV_CUDA_LOG_MEM_BANKS        ((__CUDA_ARCH__ >= 200) ? 5 : 4) // 32 banks on fermi, 16 on tesla\n    #define OPENCV_CUDA_MEM_BANKS            (1 << OPENCV_CUDA_LOG_MEM_BANKS)\n\n    ///////////////////////////////////////////////////////////////////////////////\n    // swap\n\n    template <typename T> void __device__ __host__ __forceinline__ swap(T& a, T& b)\n    {\n        const T temp = a;\n        a = b;\n        b = temp;\n    }\n\n    ///////////////////////////////////////////////////////////////////////////////\n    // Mask Reader\n\n    struct SingleMask\n    {\n        explicit __host__ __device__ __forceinline__ SingleMask(PtrStepb mask_) : mask(mask_) {}\n        __host__ __device__ __forceinline__ SingleMask(const SingleMask& mask_): mask(mask_.mask){}\n\n        __device__ __forceinline__ bool operator()(int y, int x) const\n        {\n            return mask.ptr(y)[x] != 0;\n        }\n\n        PtrStepb mask;\n    };\n\n    struct SingleMaskChannels\n    {\n        __host__ __device__ __forceinline__ SingleMaskChannels(PtrStepb mask_, int channels_)\n        : mask(mask_), channels(channels_) {}\n        __host__ __device__ __forceinline__ SingleMaskChannels(const SingleMaskChannels& mask_)\n            :mask(mask_.mask), channels(mask_.channels){}\n\n        __device__ __forceinline__ bool operator()(int y, int x) const\n        {\n            return mask.ptr(y)[x / channels] != 0;\n        }\n\n        PtrStepb mask;\n        int channels;\n    };\n\n    struct MaskCollection\n    {\n        explicit __host__ __device__ __forceinline__ MaskCollection(PtrStepb* maskCollection_)\n            : maskCollection(maskCollection_) {}\n\n        __device__ __forceinline__ MaskCollection(const MaskCollection& masks_)\n            : maskCollection(masks_.maskCollection), curMask(masks_.curMask){}\n\n        __device__ __forceinline__ void next()\n        {\n            curMask = *maskCollection++;\n        }\n        __device__ __forceinline__ void setMask(int z)\n        {\n            curMask = maskCollection[z];\n        }\n\n        __device__ __forceinline__ bool operator()(int y, int x) const\n        {\n            uchar val;\n            return curMask.data == 0 || (ForceGlob<uchar>::Load(curMask.ptr(y), x, val), (val != 0));\n        }\n\n        const PtrStepb* maskCollection;\n        PtrStepb curMask;\n    };\n\n    struct WithOutMask\n    {\n        __host__ __device__ __forceinline__ WithOutMask(){}\n        __host__ __device__ __forceinline__ WithOutMask(const WithOutMask&){}\n\n        __device__ __forceinline__ void next() const\n        {\n        }\n        __device__ __forceinline__ void setMask(int) const\n        {\n        }\n\n        __device__ __forceinline__ bool operator()(int, int) const\n        {\n            return true;\n        }\n\n        __device__ __forceinline__ bool operator()(int, int, int) const\n        {\n            return true;\n        }\n\n        static __device__ __forceinline__ bool check(int, int)\n        {\n            return true;\n        }\n\n        static __device__ __forceinline__ bool check(int, int, int)\n        {\n            return true;\n        }\n    };\n\n    ///////////////////////////////////////////////////////////////////////////////\n    // Solve linear system\n\n    // solve 2x2 linear system Ax=b\n    template <typename T> __device__ __forceinline__ bool solve2x2(const T A[2][2], const T b[2], T x[2])\n    {\n        T det = A[0][0] * A[1][1] - A[1][0] * A[0][1];\n\n        if (det != 0)\n        {\n            double invdet = 1.0 / det;\n\n            x[0] = saturate_cast<T>(invdet * (b[0] * A[1][1] - b[1] * A[0][1]));\n\n            x[1] = saturate_cast<T>(invdet * (A[0][0] * b[1] - A[1][0] * b[0]));\n\n            return true;\n        }\n\n        return false;\n    }\n\n    // solve 3x3 linear system Ax=b\n    template <typename T> __device__ __forceinline__ bool solve3x3(const T A[3][3], const T b[3], T x[3])\n    {\n        T det = A[0][0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1])\n              - A[0][1] * (A[1][0] * A[2][2] - A[1][2] * A[2][0])\n              + A[0][2] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]);\n\n        if (det != 0)\n        {\n            double invdet = 1.0 / det;\n\n            x[0] = saturate_cast<T>(invdet *\n                (b[0]    * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) -\n                 A[0][1] * (b[1]    * A[2][2] - A[1][2] * b[2]   ) +\n                 A[0][2] * (b[1]    * A[2][1] - A[1][1] * b[2]   )));\n\n            x[1] = saturate_cast<T>(invdet *\n                (A[0][0] * (b[1]    * A[2][2] - A[1][2] * b[2]   ) -\n                 b[0]    * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) +\n                 A[0][2] * (A[1][0] * b[2]    - b[1]    * A[2][0])));\n\n            x[2] = saturate_cast<T>(invdet *\n                (A[0][0] * (A[1][1] * b[2]    - b[1]    * A[2][1]) -\n                 A[0][1] * (A[1][0] * b[2]    - b[1]    * A[2][0]) +\n                 b[0]    * (A[1][0] * A[2][1] - A[1][1] * A[2][0])));\n\n            return true;\n        }\n\n        return false;\n    }\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_UTILITY_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/vec_distance.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_VEC_DISTANCE_HPP__\n#define __OPENCV_CUDA_VEC_DISTANCE_HPP__\n\n#include \"reduce.hpp\"\n#include \"functional.hpp\"\n#include \"detail/vec_distance_detail.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template <typename T> struct L1Dist\n    {\n        typedef int value_type;\n        typedef int result_type;\n\n        __device__ __forceinline__ L1Dist() : mySum(0) {}\n\n        __device__ __forceinline__ void reduceIter(int val1, int val2)\n        {\n            mySum = __sad(val1, val2, mySum);\n        }\n\n        template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(int* smem, int tid)\n        {\n            reduce<THREAD_DIM>(smem, mySum, tid, plus<int>());\n        }\n\n        __device__ __forceinline__ operator int() const\n        {\n            return mySum;\n        }\n\n        int mySum;\n    };\n    template <> struct L1Dist<float>\n    {\n        typedef float value_type;\n        typedef float result_type;\n\n        __device__ __forceinline__ L1Dist() : mySum(0.0f) {}\n\n        __device__ __forceinline__ void reduceIter(float val1, float val2)\n        {\n            mySum += ::fabs(val1 - val2);\n        }\n\n        template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(float* smem, int tid)\n        {\n            reduce<THREAD_DIM>(smem, mySum, tid, plus<float>());\n        }\n\n        __device__ __forceinline__ operator float() const\n        {\n            return mySum;\n        }\n\n        float mySum;\n    };\n\n    struct L2Dist\n    {\n        typedef float value_type;\n        typedef float result_type;\n\n        __device__ __forceinline__ L2Dist() : mySum(0.0f) {}\n\n        __device__ __forceinline__ void reduceIter(float val1, float val2)\n        {\n            float reg = val1 - val2;\n            mySum += reg * reg;\n        }\n\n        template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(float* smem, int tid)\n        {\n            reduce<THREAD_DIM>(smem, mySum, tid, plus<float>());\n        }\n\n        __device__ __forceinline__ operator float() const\n        {\n            return sqrtf(mySum);\n        }\n\n        float mySum;\n    };\n\n    struct HammingDist\n    {\n        typedef int value_type;\n        typedef int result_type;\n\n        __device__ __forceinline__ HammingDist() : mySum(0) {}\n\n        __device__ __forceinline__ void reduceIter(int val1, int val2)\n        {\n            mySum += __popc(val1 ^ val2);\n        }\n\n        template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(int* smem, int tid)\n        {\n            reduce<THREAD_DIM>(smem, mySum, tid, plus<int>());\n        }\n\n        __device__ __forceinline__ operator int() const\n        {\n            return mySum;\n        }\n\n        int mySum;\n    };\n\n    // calc distance between two vectors in global memory\n    template <int THREAD_DIM, typename Dist, typename T1, typename T2>\n    __device__ void calcVecDiffGlobal(const T1* vec1, const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid)\n    {\n        for (int i = tid; i < len; i += THREAD_DIM)\n        {\n            T1 val1;\n            ForceGlob<T1>::Load(vec1, i, val1);\n\n            T2 val2;\n            ForceGlob<T2>::Load(vec2, i, val2);\n\n            dist.reduceIter(val1, val2);\n        }\n\n        dist.reduceAll<THREAD_DIM>(smem, tid);\n    }\n\n    // calc distance between two vectors, first vector is cached in register or shared memory, second vector is in global memory\n    template <int THREAD_DIM, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename Dist, typename T1, typename T2>\n    __device__ __forceinline__ void calcVecDiffCached(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, typename Dist::result_type* smem, int tid)\n    {\n        vec_distance_detail::VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, LEN_EQ_MAX_LEN>::calc(vecCached, vecGlob, len, dist, tid);\n\n        dist.reduceAll<THREAD_DIM>(smem, tid);\n    }\n\n    // calc distance between two vectors in global memory\n    template <int THREAD_DIM, typename T1> struct VecDiffGlobal\n    {\n        explicit __device__ __forceinline__ VecDiffGlobal(const T1* vec1_, int = 0, void* = 0, int = 0, int = 0)\n        {\n            vec1 = vec1_;\n        }\n\n        template <typename T2, typename Dist>\n        __device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const\n        {\n            calcVecDiffGlobal<THREAD_DIM>(vec1, vec2, len, dist, smem, tid);\n        }\n\n        const T1* vec1;\n    };\n\n    // calc distance between two vectors, first vector is cached in register memory, second vector is in global memory\n    template <int THREAD_DIM, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename U> struct VecDiffCachedRegister\n    {\n        template <typename T1> __device__ __forceinline__ VecDiffCachedRegister(const T1* vec1, int len, U* smem, int glob_tid, int tid)\n        {\n            if (glob_tid < len)\n                smem[glob_tid] = vec1[glob_tid];\n            __syncthreads();\n\n            U* vec1ValsPtr = vec1Vals;\n\n            #pragma unroll\n            for (int i = tid; i < MAX_LEN; i += THREAD_DIM)\n                *vec1ValsPtr++ = smem[i];\n\n            __syncthreads();\n        }\n\n        template <typename T2, typename Dist>\n        __device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const\n        {\n            calcVecDiffCached<THREAD_DIM, MAX_LEN, LEN_EQ_MAX_LEN>(vec1Vals, vec2, len, dist, smem, tid);\n        }\n\n        U vec1Vals[MAX_LEN / THREAD_DIM];\n    };\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_VEC_DISTANCE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/vec_math.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_VECMATH_HPP__\n#define __OPENCV_CUDA_VECMATH_HPP__\n\n#include \"vec_traits.hpp\"\n#include \"saturate_cast.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n\n// saturate_cast\n\nnamespace vec_math_detail\n{\n    template <int cn, typename VecD> struct SatCastHelper;\n    template <typename VecD> struct SatCastHelper<1, VecD>\n    {\n        template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)\n        {\n            typedef typename VecTraits<VecD>::elem_type D;\n            return VecTraits<VecD>::make(saturate_cast<D>(v.x));\n        }\n    };\n    template <typename VecD> struct SatCastHelper<2, VecD>\n    {\n        template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)\n        {\n            typedef typename VecTraits<VecD>::elem_type D;\n            return VecTraits<VecD>::make(saturate_cast<D>(v.x), saturate_cast<D>(v.y));\n        }\n    };\n    template <typename VecD> struct SatCastHelper<3, VecD>\n    {\n        template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)\n        {\n            typedef typename VecTraits<VecD>::elem_type D;\n            return VecTraits<VecD>::make(saturate_cast<D>(v.x), saturate_cast<D>(v.y), saturate_cast<D>(v.z));\n        }\n    };\n    template <typename VecD> struct SatCastHelper<4, VecD>\n    {\n        template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)\n        {\n            typedef typename VecTraits<VecD>::elem_type D;\n            return VecTraits<VecD>::make(saturate_cast<D>(v.x), saturate_cast<D>(v.y), saturate_cast<D>(v.z), saturate_cast<D>(v.w));\n        }\n    };\n\n    template <typename VecD, typename VecS> static __device__ __forceinline__ VecD saturate_cast_helper(const VecS& v)\n    {\n        return SatCastHelper<VecTraits<VecD>::cn, VecD>::cast(v);\n    }\n}\n\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const uchar1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const char1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const ushort1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const short1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const uint1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const int1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const float1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const double1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\n\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const uchar2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const char2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const ushort2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const short2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const uint2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const int2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const float2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const double2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\n\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const uchar3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const char3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const ushort3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const short3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const uint3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const int3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const float3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const double3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\n\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const uchar4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const char4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const ushort4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const short4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const uint4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const int4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const float4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\ntemplate<typename T> static __device__ __forceinline__ T saturate_cast(const double4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}\n\n// unary operators\n\n#define CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(op, input_type, output_type) \\\n    __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a) \\\n    { \\\n        return VecTraits<output_type ## 1>::make(op (a.x)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a) \\\n    { \\\n        return VecTraits<output_type ## 2>::make(op (a.x), op (a.y)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a) \\\n    { \\\n        return VecTraits<output_type ## 3>::make(op (a.x), op (a.y), op (a.z)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a) \\\n    { \\\n        return VecTraits<output_type ## 4>::make(op (a.x), op (a.y), op (a.z), op (a.w)); \\\n    }\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, char, char)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, short, short)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, int, int)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, char, uchar)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, ushort, uchar)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, short, uchar)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, int, uchar)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, uint, uchar)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, float, uchar)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, double, uchar)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, char, char)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, ushort, ushort)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, short, short)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, int, int)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, uint, uint)\n\n#undef CV_CUDEV_IMPLEMENT_VEC_UNARY_OP\n\n// unary functions\n\n#define CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(func_name, func, input_type, output_type) \\\n    __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a) \\\n    { \\\n        return VecTraits<output_type ## 1>::make(func (a.x)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a) \\\n    { \\\n        return VecTraits<output_type ## 2>::make(func (a.x), func (a.y)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a) \\\n    { \\\n        return VecTraits<output_type ## 3>::make(func (a.x), func (a.y), func (a.z)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a) \\\n    { \\\n        return VecTraits<output_type ## 4>::make(func (a.x), func (a.y), func (a.z), func (a.w)); \\\n    }\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, char, char)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, ushort, ushort)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, short, short)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, int, int)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, uint, uint)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabsf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabs, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrt, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::exp, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::log, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sin, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cos, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tan, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asin, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acos, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atan, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinh, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::cosh, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanh, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinh, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acosh, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanh, double, double)\n\n#undef CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC\n\n// binary operators (vec & vec)\n\n#define CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(op, input_type, output_type) \\\n    __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a, const input_type ## 1 & b) \\\n    { \\\n        return VecTraits<output_type ## 1>::make(a.x op b.x); \\\n    } \\\n    __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a, const input_type ## 2 & b) \\\n    { \\\n        return VecTraits<output_type ## 2>::make(a.x op b.x, a.y op b.y); \\\n    } \\\n    __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a, const input_type ## 3 & b) \\\n    { \\\n        return VecTraits<output_type ## 3>::make(a.x op b.x, a.y op b.y, a.z op b.z); \\\n    } \\\n    __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a, const input_type ## 4 & b) \\\n    { \\\n        return VecTraits<output_type ## 4>::make(a.x op b.x, a.y op b.y, a.z op b.z, a.w op b.w); \\\n    }\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, uchar, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, char, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, ushort, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, short, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, int, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, uint, uint)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, float, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, uchar, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, char, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, ushort, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, short, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, int, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, uint, uint)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, float, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, uchar, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, char, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, ushort, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, short, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, int, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, uint, uint)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, float, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, uchar, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, char, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, ushort, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, short, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, int, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, uint, uint)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, float, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, char, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, ushort, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, short, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, int, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, uint, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, float, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, double, uchar)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, char, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, ushort, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, short, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, int, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, uint, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, float, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, double, uchar)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, char, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, ushort, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, short, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, int, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, uint, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, float, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, double, uchar)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, char, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, ushort, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, short, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, int, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, uint, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, float, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, double, uchar)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, char, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, ushort, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, short, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, int, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, uint, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, float, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, double, uchar)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, char, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, ushort, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, short, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, int, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, uint, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, float, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, double, uchar)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, char, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, ushort, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, short, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, int, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, uint, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, float, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, double, uchar)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, char, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, ushort, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, short, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, int, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, uint, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, float, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, double, uchar)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, char, char)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, ushort, ushort)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, short, short)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, int, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, uint, uint)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, char, char)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, ushort, ushort)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, short, short)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, int, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, uint, uint)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, char, char)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, ushort, ushort)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, short, short)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, int, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, uint, uint)\n\n#undef CV_CUDEV_IMPLEMENT_VEC_BINARY_OP\n\n// binary operators (vec & scalar)\n\n#define CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(op, input_type, scalar_type, output_type) \\\n    __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a, scalar_type s) \\\n    { \\\n        return VecTraits<output_type ## 1>::make(a.x op s); \\\n    } \\\n    __device__ __forceinline__ output_type ## 1 operator op(scalar_type s, const input_type ## 1 & b) \\\n    { \\\n        return VecTraits<output_type ## 1>::make(s op b.x); \\\n    } \\\n    __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a, scalar_type s) \\\n    { \\\n        return VecTraits<output_type ## 2>::make(a.x op s, a.y op s); \\\n    } \\\n    __device__ __forceinline__ output_type ## 2 operator op(scalar_type s, const input_type ## 2 & b) \\\n    { \\\n        return VecTraits<output_type ## 2>::make(s op b.x, s op b.y); \\\n    } \\\n    __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a, scalar_type s) \\\n    { \\\n        return VecTraits<output_type ## 3>::make(a.x op s, a.y op s, a.z op s); \\\n    } \\\n    __device__ __forceinline__ output_type ## 3 operator op(scalar_type s, const input_type ## 3 & b) \\\n    { \\\n        return VecTraits<output_type ## 3>::make(s op b.x, s op b.y, s op b.z); \\\n    } \\\n    __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a, scalar_type s) \\\n    { \\\n        return VecTraits<output_type ## 4>::make(a.x op s, a.y op s, a.z op s, a.w op s); \\\n    } \\\n    __device__ __forceinline__ output_type ## 4 operator op(scalar_type s, const input_type ## 4 & b) \\\n    { \\\n        return VecTraits<output_type ## 4>::make(s op b.x, s op b.y, s op b.z, s op b.w); \\\n    }\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, uint, uint)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, float, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, float, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, double, double, double)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, uint, uint)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, float, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, float, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, double, double, double)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, uint, uint)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, float, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, float, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, double, double, double)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, uint, uint)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, float, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, float, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, double, double, double)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, char, char, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, ushort, ushort, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, short, short, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, int, int, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, uint, uint, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, float, float, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, double, double, uchar)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, char, char, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, ushort, ushort, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, short, short, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, int, int, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, uint, uint, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, float, float, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, double, double, uchar)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, char, char, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, ushort, ushort, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, short, short, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, int, int, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, uint, uint, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, float, float, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, double, double, uchar)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, char, char, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, ushort, ushort, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, short, short, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, int, int, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, uint, uint, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, float, float, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, double, double, uchar)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, char, char, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, ushort, ushort, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, short, short, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, int, int, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, uint, uint, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, float, float, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, double, double, uchar)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, char, char, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, ushort, ushort, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, short, short, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, int, int, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, uint, uint, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, float, float, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, double, double, uchar)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, char, char, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, ushort, ushort, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, short, short, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, int, int, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, uint, uint, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, float, float, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, double, double, uchar)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, char, char, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, ushort, ushort, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, short, short, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, int, int, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, uint, uint, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, float, float, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, double, double, uchar)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, char, char, char)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, ushort, ushort, ushort)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, short, short, short)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, int, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, uint, uint, uint)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, char, char, char)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, ushort, ushort, ushort)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, short, short, short)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, int, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, uint, uint, uint)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, char, char, char)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, ushort, ushort, ushort)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, short, short, short)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, int, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, uint, uint, uint)\n\n#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP\n\n// binary function (vec & vec)\n\n#define CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(func_name, func, input_type, output_type) \\\n    __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a, const input_type ## 1 & b) \\\n    { \\\n        return VecTraits<output_type ## 1>::make(func (a.x, b.x)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a, const input_type ## 2 & b) \\\n    { \\\n        return VecTraits<output_type ## 2>::make(func (a.x, b.x), func (a.y, b.y)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a, const input_type ## 3 & b) \\\n    { \\\n        return VecTraits<output_type ## 3>::make(func (a.x, b.x), func (a.y, b.y), func (a.z, b.z)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a, const input_type ## 4 & b) \\\n    { \\\n        return VecTraits<output_type ## 4>::make(func (a.x, b.x), func (a.y, b.y), func (a.z, b.z), func (a.w, b.w)); \\\n    }\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, char, char)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, ushort, ushort)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, short, short)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, uint, uint)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, int, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::fmaxf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::fmax, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, uchar, uchar)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, char, char)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, ushort, ushort)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, short, short)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, uint, uint)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, int, int)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::fminf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::fmin, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, char, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, short, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, int, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, float, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypot, double, double)\n\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, uchar, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, char, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, ushort, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, short, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, uint, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, int, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, float, float)\nCV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2, double, double)\n\n#undef CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC\n\n// binary function (vec & scalar)\n\n#define CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(func_name, func, input_type, scalar_type, output_type) \\\n    __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a, scalar_type s) \\\n    { \\\n        return VecTraits<output_type ## 1>::make(func ((output_type) a.x, (output_type) s)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 1 func_name(scalar_type s, const input_type ## 1 & b) \\\n    { \\\n        return VecTraits<output_type ## 1>::make(func ((output_type) s, (output_type) b.x)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a, scalar_type s) \\\n    { \\\n        return VecTraits<output_type ## 2>::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 2 func_name(scalar_type s, const input_type ## 2 & b) \\\n    { \\\n        return VecTraits<output_type ## 2>::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a, scalar_type s) \\\n    { \\\n        return VecTraits<output_type ## 3>::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s), func ((output_type) a.z, (output_type) s)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 3 func_name(scalar_type s, const input_type ## 3 & b) \\\n    { \\\n        return VecTraits<output_type ## 3>::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y), func ((output_type) s, (output_type) b.z)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a, scalar_type s) \\\n    { \\\n        return VecTraits<output_type ## 4>::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s), func ((output_type) a.z, (output_type) s), func ((output_type) a.w, (output_type) s)); \\\n    } \\\n    __device__ __forceinline__ output_type ## 4 func_name(scalar_type s, const input_type ## 4 & b) \\\n    { \\\n        return VecTraits<output_type ## 4>::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y), func ((output_type) s, (output_type) b.z), func ((output_type) s, (output_type) b.w)); \\\n    }\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, uchar, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, uchar, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, char, char, char)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, char, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, char, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, ushort, ushort, ushort)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, ushort, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, ushort, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, short, short, short)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, short, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, short, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, uint, uint, uint)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, uint, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, uint, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, int, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, int, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, int, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, float, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, float, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, double, double, double)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, uchar, uchar, uchar)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, uchar, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, uchar, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, char, char, char)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, char, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, char, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, ushort, ushort, ushort)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, ushort, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, ushort, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, short, short, short)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, short, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, short, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, uint, uint, uint)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, uint, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, uint, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, int, int, int)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, int, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, int, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, float, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, float, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, double, double, double)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, uchar, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, uchar, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, char, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, char, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, ushort, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, ushort, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, short, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, short, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, uint, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, uint, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, int, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, int, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, float, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, float, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, double, double, double)\n\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, uchar, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, uchar, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, char, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, char, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, ushort, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, ushort, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, short, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, short, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, uint, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, uint, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, int, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, int, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, float, float, float)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, float, double, double)\nCV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, double, double, double)\n\n#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC\n\n}}} // namespace cv { namespace cuda { namespace device\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_VECMATH_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/vec_traits.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_VEC_TRAITS_HPP__\n#define __OPENCV_CUDA_VEC_TRAITS_HPP__\n\n#include \"common.hpp\"\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template<typename T, int N> struct TypeVec;\n\n    struct __align__(8) uchar8\n    {\n        uchar a0, a1, a2, a3, a4, a5, a6, a7;\n    };\n    static __host__ __device__ __forceinline__ uchar8 make_uchar8(uchar a0, uchar a1, uchar a2, uchar a3, uchar a4, uchar a5, uchar a6, uchar a7)\n    {\n        uchar8 val = {a0, a1, a2, a3, a4, a5, a6, a7};\n        return val;\n    }\n    struct __align__(8) char8\n    {\n        schar a0, a1, a2, a3, a4, a5, a6, a7;\n    };\n    static __host__ __device__ __forceinline__ char8 make_char8(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7)\n    {\n        char8 val = {a0, a1, a2, a3, a4, a5, a6, a7};\n        return val;\n    }\n    struct __align__(16) ushort8\n    {\n        ushort a0, a1, a2, a3, a4, a5, a6, a7;\n    };\n    static __host__ __device__ __forceinline__ ushort8 make_ushort8(ushort a0, ushort a1, ushort a2, ushort a3, ushort a4, ushort a5, ushort a6, ushort a7)\n    {\n        ushort8 val = {a0, a1, a2, a3, a4, a5, a6, a7};\n        return val;\n    }\n    struct __align__(16) short8\n    {\n        short a0, a1, a2, a3, a4, a5, a6, a7;\n    };\n    static __host__ __device__ __forceinline__ short8 make_short8(short a0, short a1, short a2, short a3, short a4, short a5, short a6, short a7)\n    {\n        short8 val = {a0, a1, a2, a3, a4, a5, a6, a7};\n        return val;\n    }\n    struct __align__(32) uint8\n    {\n        uint a0, a1, a2, a3, a4, a5, a6, a7;\n    };\n    static __host__ __device__ __forceinline__ uint8 make_uint8(uint a0, uint a1, uint a2, uint a3, uint a4, uint a5, uint a6, uint a7)\n    {\n        uint8 val = {a0, a1, a2, a3, a4, a5, a6, a7};\n        return val;\n    }\n    struct __align__(32) int8\n    {\n        int a0, a1, a2, a3, a4, a5, a6, a7;\n    };\n    static __host__ __device__ __forceinline__ int8 make_int8(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7)\n    {\n        int8 val = {a0, a1, a2, a3, a4, a5, a6, a7};\n        return val;\n    }\n    struct __align__(32) float8\n    {\n        float a0, a1, a2, a3, a4, a5, a6, a7;\n    };\n    static __host__ __device__ __forceinline__ float8 make_float8(float a0, float a1, float a2, float a3, float a4, float a5, float a6, float a7)\n    {\n        float8 val = {a0, a1, a2, a3, a4, a5, a6, a7};\n        return val;\n    }\n    struct double8\n    {\n        double a0, a1, a2, a3, a4, a5, a6, a7;\n    };\n    static __host__ __device__ __forceinline__ double8 make_double8(double a0, double a1, double a2, double a3, double a4, double a5, double a6, double a7)\n    {\n        double8 val = {a0, a1, a2, a3, a4, a5, a6, a7};\n        return val;\n    }\n\n#define OPENCV_CUDA_IMPLEMENT_TYPE_VEC(type) \\\n    template<> struct TypeVec<type, 1> { typedef type vec_type; }; \\\n    template<> struct TypeVec<type ## 1, 1> { typedef type ## 1 vec_type; }; \\\n    template<> struct TypeVec<type, 2> { typedef type ## 2 vec_type; }; \\\n    template<> struct TypeVec<type ## 2, 2> { typedef type ## 2 vec_type; }; \\\n    template<> struct TypeVec<type, 3> { typedef type ## 3 vec_type; }; \\\n    template<> struct TypeVec<type ## 3, 3> { typedef type ## 3 vec_type; }; \\\n    template<> struct TypeVec<type, 4> { typedef type ## 4 vec_type; }; \\\n    template<> struct TypeVec<type ## 4, 4> { typedef type ## 4 vec_type; }; \\\n    template<> struct TypeVec<type, 8> { typedef type ## 8 vec_type; }; \\\n    template<> struct TypeVec<type ## 8, 8> { typedef type ## 8 vec_type; };\n\n    OPENCV_CUDA_IMPLEMENT_TYPE_VEC(uchar)\n    OPENCV_CUDA_IMPLEMENT_TYPE_VEC(char)\n    OPENCV_CUDA_IMPLEMENT_TYPE_VEC(ushort)\n    OPENCV_CUDA_IMPLEMENT_TYPE_VEC(short)\n    OPENCV_CUDA_IMPLEMENT_TYPE_VEC(int)\n    OPENCV_CUDA_IMPLEMENT_TYPE_VEC(uint)\n    OPENCV_CUDA_IMPLEMENT_TYPE_VEC(float)\n    OPENCV_CUDA_IMPLEMENT_TYPE_VEC(double)\n\n    #undef OPENCV_CUDA_IMPLEMENT_TYPE_VEC\n\n    template<> struct TypeVec<schar, 1> { typedef schar vec_type; };\n    template<> struct TypeVec<schar, 2> { typedef char2 vec_type; };\n    template<> struct TypeVec<schar, 3> { typedef char3 vec_type; };\n    template<> struct TypeVec<schar, 4> { typedef char4 vec_type; };\n    template<> struct TypeVec<schar, 8> { typedef char8 vec_type; };\n\n    template<> struct TypeVec<bool, 1> { typedef uchar vec_type; };\n    template<> struct TypeVec<bool, 2> { typedef uchar2 vec_type; };\n    template<> struct TypeVec<bool, 3> { typedef uchar3 vec_type; };\n    template<> struct TypeVec<bool, 4> { typedef uchar4 vec_type; };\n    template<> struct TypeVec<bool, 8> { typedef uchar8 vec_type; };\n\n    template<typename T> struct VecTraits;\n\n#define OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(type) \\\n    template<> struct VecTraits<type> \\\n    { \\\n        typedef type elem_type; \\\n        enum {cn=1}; \\\n        static __device__ __host__ __forceinline__ type all(type v) {return v;} \\\n        static __device__ __host__ __forceinline__ type make(type x) {return x;} \\\n        static __device__ __host__ __forceinline__ type make(const type* v) {return *v;} \\\n    }; \\\n    template<> struct VecTraits<type ## 1> \\\n    { \\\n        typedef type elem_type; \\\n        enum {cn=1}; \\\n        static __device__ __host__ __forceinline__ type ## 1 all(type v) {return make_ ## type ## 1(v);} \\\n        static __device__ __host__ __forceinline__ type ## 1 make(type x) {return make_ ## type ## 1(x);} \\\n        static __device__ __host__ __forceinline__ type ## 1 make(const type* v) {return make_ ## type ## 1(*v);} \\\n    }; \\\n    template<> struct VecTraits<type ## 2> \\\n    { \\\n        typedef type elem_type; \\\n        enum {cn=2}; \\\n        static __device__ __host__ __forceinline__ type ## 2 all(type v) {return make_ ## type ## 2(v, v);} \\\n        static __device__ __host__ __forceinline__ type ## 2 make(type x, type y) {return make_ ## type ## 2(x, y);} \\\n        static __device__ __host__ __forceinline__ type ## 2 make(const type* v) {return make_ ## type ## 2(v[0], v[1]);} \\\n    }; \\\n    template<> struct VecTraits<type ## 3> \\\n    { \\\n        typedef type elem_type; \\\n        enum {cn=3}; \\\n        static __device__ __host__ __forceinline__ type ## 3 all(type v) {return make_ ## type ## 3(v, v, v);} \\\n        static __device__ __host__ __forceinline__ type ## 3 make(type x, type y, type z) {return make_ ## type ## 3(x, y, z);} \\\n        static __device__ __host__ __forceinline__ type ## 3 make(const type* v) {return make_ ## type ## 3(v[0], v[1], v[2]);} \\\n    }; \\\n    template<> struct VecTraits<type ## 4> \\\n    { \\\n        typedef type elem_type; \\\n        enum {cn=4}; \\\n        static __device__ __host__ __forceinline__ type ## 4 all(type v) {return make_ ## type ## 4(v, v, v, v);} \\\n        static __device__ __host__ __forceinline__ type ## 4 make(type x, type y, type z, type w) {return make_ ## type ## 4(x, y, z, w);} \\\n        static __device__ __host__ __forceinline__ type ## 4 make(const type* v) {return make_ ## type ## 4(v[0], v[1], v[2], v[3]);} \\\n    }; \\\n    template<> struct VecTraits<type ## 8> \\\n    { \\\n        typedef type elem_type; \\\n        enum {cn=8}; \\\n        static __device__ __host__ __forceinline__ type ## 8 all(type v) {return make_ ## type ## 8(v, v, v, v, v, v, v, v);} \\\n        static __device__ __host__ __forceinline__ type ## 8 make(type a0, type a1, type a2, type a3, type a4, type a5, type a6, type a7) {return make_ ## type ## 8(a0, a1, a2, a3, a4, a5, a6, a7);} \\\n        static __device__ __host__ __forceinline__ type ## 8 make(const type* v) {return make_ ## type ## 8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);} \\\n    };\n\n    OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(uchar)\n    OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(ushort)\n    OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(short)\n    OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(int)\n    OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(uint)\n    OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(float)\n    OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(double)\n\n    #undef OPENCV_CUDA_IMPLEMENT_VEC_TRAITS\n\n    template<> struct VecTraits<char>\n    {\n        typedef char elem_type;\n        enum {cn=1};\n        static __device__ __host__ __forceinline__ char all(char v) {return v;}\n        static __device__ __host__ __forceinline__ char make(char x) {return x;}\n        static __device__ __host__ __forceinline__ char make(const char* x) {return *x;}\n    };\n    template<> struct VecTraits<schar>\n    {\n        typedef schar elem_type;\n        enum {cn=1};\n        static __device__ __host__ __forceinline__ schar all(schar v) {return v;}\n        static __device__ __host__ __forceinline__ schar make(schar x) {return x;}\n        static __device__ __host__ __forceinline__ schar make(const schar* x) {return *x;}\n    };\n    template<> struct VecTraits<char1>\n    {\n        typedef schar elem_type;\n        enum {cn=1};\n        static __device__ __host__ __forceinline__ char1 all(schar v) {return make_char1(v);}\n        static __device__ __host__ __forceinline__ char1 make(schar x) {return make_char1(x);}\n        static __device__ __host__ __forceinline__ char1 make(const schar* v) {return make_char1(v[0]);}\n    };\n    template<> struct VecTraits<char2>\n    {\n        typedef schar elem_type;\n        enum {cn=2};\n        static __device__ __host__ __forceinline__ char2 all(schar v) {return make_char2(v, v);}\n        static __device__ __host__ __forceinline__ char2 make(schar x, schar y) {return make_char2(x, y);}\n        static __device__ __host__ __forceinline__ char2 make(const schar* v) {return make_char2(v[0], v[1]);}\n    };\n    template<> struct VecTraits<char3>\n    {\n        typedef schar elem_type;\n        enum {cn=3};\n        static __device__ __host__ __forceinline__ char3 all(schar v) {return make_char3(v, v, v);}\n        static __device__ __host__ __forceinline__ char3 make(schar x, schar y, schar z) {return make_char3(x, y, z);}\n        static __device__ __host__ __forceinline__ char3 make(const schar* v) {return make_char3(v[0], v[1], v[2]);}\n    };\n    template<> struct VecTraits<char4>\n    {\n        typedef schar elem_type;\n        enum {cn=4};\n        static __device__ __host__ __forceinline__ char4 all(schar v) {return make_char4(v, v, v, v);}\n        static __device__ __host__ __forceinline__ char4 make(schar x, schar y, schar z, schar w) {return make_char4(x, y, z, w);}\n        static __device__ __host__ __forceinline__ char4 make(const schar* v) {return make_char4(v[0], v[1], v[2], v[3]);}\n    };\n    template<> struct VecTraits<char8>\n    {\n        typedef schar elem_type;\n        enum {cn=8};\n        static __device__ __host__ __forceinline__ char8 all(schar v) {return make_char8(v, v, v, v, v, v, v, v);}\n        static __device__ __host__ __forceinline__ char8 make(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7) {return make_char8(a0, a1, a2, a3, a4, a5, a6, a7);}\n        static __device__ __host__ __forceinline__ char8 make(const schar* v) {return make_char8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);}\n    };\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_VEC_TRAITS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/warp.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_DEVICE_WARP_HPP__\n#define __OPENCV_CUDA_DEVICE_WARP_HPP__\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    struct Warp\n    {\n        enum\n        {\n            LOG_WARP_SIZE = 5,\n            WARP_SIZE     = 1 << LOG_WARP_SIZE,\n            STRIDE        = WARP_SIZE\n        };\n\n        /** \\brief Returns the warp lane ID of the calling thread. */\n        static __device__ __forceinline__ unsigned int laneId()\n        {\n            unsigned int ret;\n            asm(\"mov.u32 %0, %laneid;\" : \"=r\"(ret) );\n            return ret;\n        }\n\n        template<typename It, typename T>\n        static __device__ __forceinline__ void fill(It beg, It end, const T& value)\n        {\n            for(It t = beg + laneId(); t < end; t += STRIDE)\n                *t = value;\n        }\n\n        template<typename InIt, typename OutIt>\n        static __device__ __forceinline__ OutIt copy(InIt beg, InIt end, OutIt out)\n        {\n            for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE)\n                *out = *t;\n            return out;\n        }\n\n        template<typename InIt, typename OutIt, class UnOp>\n        static __device__ __forceinline__ OutIt transform(InIt beg, InIt end, OutIt out, UnOp op)\n        {\n            for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE)\n                *out = op(*t);\n            return out;\n        }\n\n        template<typename InIt1, typename InIt2, typename OutIt, class BinOp>\n        static __device__ __forceinline__ OutIt transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)\n        {\n            unsigned int lane = laneId();\n\n            InIt1 t1 = beg1 + lane;\n            InIt2 t2 = beg2 + lane;\n            for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, out += STRIDE)\n                *out = op(*t1, *t2);\n            return out;\n        }\n\n        template <class T, class BinOp>\n        static __device__ __forceinline__ T reduce(volatile T *ptr, BinOp op)\n        {\n            const unsigned int lane = laneId();\n\n            if (lane < 16)\n            {\n                T partial = ptr[lane];\n\n                ptr[lane] = partial = op(partial, ptr[lane + 16]);\n                ptr[lane] = partial = op(partial, ptr[lane + 8]);\n                ptr[lane] = partial = op(partial, ptr[lane + 4]);\n                ptr[lane] = partial = op(partial, ptr[lane + 2]);\n                ptr[lane] = partial = op(partial, ptr[lane + 1]);\n            }\n\n            return *ptr;\n        }\n\n        template<typename OutIt, typename T>\n        static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value)\n        {\n            unsigned int lane = laneId();\n            value += lane;\n\n            for(OutIt t = beg + lane; t < end; t += STRIDE, value += STRIDE)\n                *t = value;\n        }\n    };\n}}} // namespace cv { namespace cuda { namespace cudev\n\n//! @endcond\n\n#endif /* __OPENCV_CUDA_DEVICE_WARP_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/warp_reduce.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_CUDA_WARP_REDUCE_HPP__\n#define OPENCV_CUDA_WARP_REDUCE_HPP__\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template <class T>\n    __device__ __forceinline__ T warp_reduce(volatile T *ptr , const unsigned int tid = threadIdx.x)\n    {\n        const unsigned int lane = tid & 31; // index of thread in warp (0..31)\n\n        if (lane < 16)\n        {\n            T partial = ptr[tid];\n\n            ptr[tid] = partial = partial + ptr[tid + 16];\n            ptr[tid] = partial = partial + ptr[tid + 8];\n            ptr[tid] = partial = partial + ptr[tid + 4];\n            ptr[tid] = partial = partial + ptr[tid + 2];\n            ptr[tid] = partial = partial + ptr[tid + 1];\n        }\n\n        return ptr[tid - lane];\n    }\n}}} // namespace cv { namespace cuda { namespace cudev {\n\n//! @endcond\n\n#endif /* OPENCV_CUDA_WARP_REDUCE_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda/warp_shuffle.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CUDA_WARP_SHUFFLE_HPP__\n#define __OPENCV_CUDA_WARP_SHUFFLE_HPP__\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda { namespace device\n{\n    template <typename T>\n    __device__ __forceinline__ T shfl(T val, int srcLane, int width = warpSize)\n    {\n    #if __CUDA_ARCH__ >= 300\n        return __shfl(val, srcLane, width);\n    #else\n        return T();\n    #endif\n    }\n    __device__ __forceinline__ unsigned int shfl(unsigned int val, int srcLane, int width = warpSize)\n    {\n    #if __CUDA_ARCH__ >= 300\n        return (unsigned int) __shfl((int) val, srcLane, width);\n    #else\n        return 0;\n    #endif\n    }\n    __device__ __forceinline__ double shfl(double val, int srcLane, int width = warpSize)\n    {\n    #if __CUDA_ARCH__ >= 300\n        int lo = __double2loint(val);\n        int hi = __double2hiint(val);\n\n        lo = __shfl(lo, srcLane, width);\n        hi = __shfl(hi, srcLane, width);\n\n        return __hiloint2double(hi, lo);\n    #else\n        return 0.0;\n    #endif\n    }\n\n    template <typename T>\n    __device__ __forceinline__ T shfl_down(T val, unsigned int delta, int width = warpSize)\n    {\n    #if __CUDA_ARCH__ >= 300\n        return __shfl_down(val, delta, width);\n    #else\n        return T();\n    #endif\n    }\n    __device__ __forceinline__ unsigned int shfl_down(unsigned int val, unsigned int delta, int width = warpSize)\n    {\n    #if __CUDA_ARCH__ >= 300\n        return (unsigned int) __shfl_down((int) val, delta, width);\n    #else\n        return 0;\n    #endif\n    }\n    __device__ __forceinline__ double shfl_down(double val, unsigned int delta, int width = warpSize)\n    {\n    #if __CUDA_ARCH__ >= 300\n        int lo = __double2loint(val);\n        int hi = __double2hiint(val);\n\n        lo = __shfl_down(lo, delta, width);\n        hi = __shfl_down(hi, delta, width);\n\n        return __hiloint2double(hi, lo);\n    #else\n        return 0.0;\n    #endif\n    }\n\n    template <typename T>\n    __device__ __forceinline__ T shfl_up(T val, unsigned int delta, int width = warpSize)\n    {\n    #if __CUDA_ARCH__ >= 300\n        return __shfl_up(val, delta, width);\n    #else\n        return T();\n    #endif\n    }\n    __device__ __forceinline__ unsigned int shfl_up(unsigned int val, unsigned int delta, int width = warpSize)\n    {\n    #if __CUDA_ARCH__ >= 300\n        return (unsigned int) __shfl_up((int) val, delta, width);\n    #else\n        return 0;\n    #endif\n    }\n    __device__ __forceinline__ double shfl_up(double val, unsigned int delta, int width = warpSize)\n    {\n    #if __CUDA_ARCH__ >= 300\n        int lo = __double2loint(val);\n        int hi = __double2hiint(val);\n\n        lo = __shfl_up(lo, delta, width);\n        hi = __shfl_up(hi, delta, width);\n\n        return __hiloint2double(hi, lo);\n    #else\n        return 0.0;\n    #endif\n    }\n}}}\n\n//! @endcond\n\n#endif // __OPENCV_CUDA_WARP_SHUFFLE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_CUDA_HPP__\n#define __OPENCV_CORE_CUDA_HPP__\n\n#ifndef __cplusplus\n#  error cuda.hpp header must be compiled as C++\n#endif\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/core/cuda_types.hpp\"\n\n/**\n  @defgroup cuda CUDA-accelerated Computer Vision\n  @{\n    @defgroup cudacore Core part\n    @{\n      @defgroup cudacore_init Initalization and Information\n      @defgroup cudacore_struct Data Structures\n    @}\n  @}\n */\n\nnamespace cv { namespace cuda {\n\n//! @addtogroup cudacore_struct\n//! @{\n\n//===================================================================================\n// GpuMat\n//===================================================================================\n\n/** @brief Base storage class for GPU memory with reference counting.\n\nIts interface matches the Mat interface with the following limitations:\n\n-   no arbitrary dimensions support (only 2D)\n-   no functions that return references to their data (because references on GPU are not valid for\n    CPU)\n-   no expression templates technique support\n\nBeware that the latter limitation may lead to overloaded matrix operators that cause memory\nallocations. The GpuMat class is convertible to cuda::PtrStepSz and cuda::PtrStep so it can be\npassed directly to the kernel.\n\n@note In contrast with Mat, in most cases GpuMat::isContinuous() == false . This means that rows are\naligned to a size depending on the hardware. Single-row GpuMat is always a continuous matrix.\n\n@note You are not recommended to leave static or global GpuMat variables allocated, that is, to rely\non its destructor. The destruction order of such variables and CUDA context is undefined. GPU memory\nrelease function returns error if the CUDA context has been destroyed before.\n\n@sa Mat\n */\nclass CV_EXPORTS GpuMat\n{\npublic:\n    class CV_EXPORTS Allocator\n    {\n    public:\n        virtual ~Allocator() {}\n\n        // allocator must fill data, step and refcount fields\n        virtual bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize) = 0;\n        virtual void free(GpuMat* mat) = 0;\n    };\n\n    //! default allocator\n    static Allocator* defaultAllocator();\n    static void setDefaultAllocator(Allocator* allocator);\n\n    //! default constructor\n    explicit GpuMat(Allocator* allocator = defaultAllocator());\n\n    //! constructs GpuMat of the specified size and type\n    GpuMat(int rows, int cols, int type, Allocator* allocator = defaultAllocator());\n    GpuMat(Size size, int type, Allocator* allocator = defaultAllocator());\n\n    //! constucts GpuMat and fills it with the specified value _s\n    GpuMat(int rows, int cols, int type, Scalar s, Allocator* allocator = defaultAllocator());\n    GpuMat(Size size, int type, Scalar s, Allocator* allocator = defaultAllocator());\n\n    //! copy constructor\n    GpuMat(const GpuMat& m);\n\n    //! constructor for GpuMat headers pointing to user-allocated data\n    GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP);\n    GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP);\n\n    //! creates a GpuMat header for a part of the bigger matrix\n    GpuMat(const GpuMat& m, Range rowRange, Range colRange);\n    GpuMat(const GpuMat& m, Rect roi);\n\n    //! builds GpuMat from host memory (Blocking call)\n    explicit GpuMat(InputArray arr, Allocator* allocator = defaultAllocator());\n\n    //! destructor - calls release()\n    ~GpuMat();\n\n    //! assignment operators\n    GpuMat& operator =(const GpuMat& m);\n\n    //! allocates new GpuMat data unless the GpuMat already has specified size and type\n    void create(int rows, int cols, int type);\n    void create(Size size, int type);\n\n    //! decreases reference counter, deallocate the data when reference counter reaches 0\n    void release();\n\n    //! swaps with other smart pointer\n    void swap(GpuMat& mat);\n\n    //! pefroms upload data to GpuMat (Blocking call)\n    void upload(InputArray arr);\n\n    //! pefroms upload data to GpuMat (Non-Blocking call)\n    void upload(InputArray arr, Stream& stream);\n\n    //! pefroms download data from device to host memory (Blocking call)\n    void download(OutputArray dst) const;\n\n    //! pefroms download data from device to host memory (Non-Blocking call)\n    void download(OutputArray dst, Stream& stream) const;\n\n    //! returns deep copy of the GpuMat, i.e. the data is copied\n    GpuMat clone() const;\n\n    //! copies the GpuMat content to device memory (Blocking call)\n    void copyTo(OutputArray dst) const;\n\n    //! copies the GpuMat content to device memory (Non-Blocking call)\n    void copyTo(OutputArray dst, Stream& stream) const;\n\n    //! copies those GpuMat elements to \"m\" that are marked with non-zero mask elements (Blocking call)\n    void copyTo(OutputArray dst, InputArray mask) const;\n\n    //! copies those GpuMat elements to \"m\" that are marked with non-zero mask elements (Non-Blocking call)\n    void copyTo(OutputArray dst, InputArray mask, Stream& stream) const;\n\n    //! sets some of the GpuMat elements to s (Blocking call)\n    GpuMat& setTo(Scalar s);\n\n    //! sets some of the GpuMat elements to s (Non-Blocking call)\n    GpuMat& setTo(Scalar s, Stream& stream);\n\n    //! sets some of the GpuMat elements to s, according to the mask (Blocking call)\n    GpuMat& setTo(Scalar s, InputArray mask);\n\n    //! sets some of the GpuMat elements to s, according to the mask (Non-Blocking call)\n    GpuMat& setTo(Scalar s, InputArray mask, Stream& stream);\n\n    //! converts GpuMat to another datatype (Blocking call)\n    void convertTo(OutputArray dst, int rtype) const;\n\n    //! converts GpuMat to another datatype (Non-Blocking call)\n    void convertTo(OutputArray dst, int rtype, Stream& stream) const;\n\n    //! converts GpuMat to another datatype with scaling (Blocking call)\n    void convertTo(OutputArray dst, int rtype, double alpha, double beta = 0.0) const;\n\n    //! converts GpuMat to another datatype with scaling (Non-Blocking call)\n    void convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const;\n\n    //! converts GpuMat to another datatype with scaling (Non-Blocking call)\n    void convertTo(OutputArray dst, int rtype, double alpha, double beta, Stream& stream) const;\n\n    void assignTo(GpuMat& m, int type=-1) const;\n\n    //! returns pointer to y-th row\n    uchar* ptr(int y = 0);\n    const uchar* ptr(int y = 0) const;\n\n    //! template version of the above method\n    template<typename _Tp> _Tp* ptr(int y = 0);\n    template<typename _Tp> const _Tp* ptr(int y = 0) const;\n\n    template <typename _Tp> operator PtrStepSz<_Tp>() const;\n    template <typename _Tp> operator PtrStep<_Tp>() const;\n\n    //! returns a new GpuMat header for the specified row\n    GpuMat row(int y) const;\n\n    //! returns a new GpuMat header for the specified column\n    GpuMat col(int x) const;\n\n    //! ... for the specified row span\n    GpuMat rowRange(int startrow, int endrow) const;\n    GpuMat rowRange(Range r) const;\n\n    //! ... for the specified column span\n    GpuMat colRange(int startcol, int endcol) const;\n    GpuMat colRange(Range r) const;\n\n    //! extracts a rectangular sub-GpuMat (this is a generalized form of row, rowRange etc.)\n    GpuMat operator ()(Range rowRange, Range colRange) const;\n    GpuMat operator ()(Rect roi) const;\n\n    //! creates alternative GpuMat header for the same data, with different\n    //! number of channels and/or different number of rows\n    GpuMat reshape(int cn, int rows = 0) const;\n\n    //! locates GpuMat header within a parent GpuMat\n    void locateROI(Size& wholeSize, Point& ofs) const;\n\n    //! moves/resizes the current GpuMat ROI inside the parent GpuMat\n    GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright);\n\n    //! returns true iff the GpuMat data is continuous\n    //! (i.e. when there are no gaps between successive rows)\n    bool isContinuous() const;\n\n    //! returns element size in bytes\n    size_t elemSize() const;\n\n    //! returns the size of element channel in bytes\n    size_t elemSize1() const;\n\n    //! returns element type\n    int type() const;\n\n    //! returns element type\n    int depth() const;\n\n    //! returns number of channels\n    int channels() const;\n\n    //! returns step/elemSize1()\n    size_t step1() const;\n\n    //! returns GpuMat size : width == number of columns, height == number of rows\n    Size size() const;\n\n    //! returns true if GpuMat data is NULL\n    bool empty() const;\n\n    /*! includes several bit-fields:\n    - the magic signature\n    - continuity flag\n    - depth\n    - number of channels\n    */\n    int flags;\n\n    //! the number of rows and columns\n    int rows, cols;\n\n    //! a distance between successive rows in bytes; includes the gap if any\n    size_t step;\n\n    //! pointer to the data\n    uchar* data;\n\n    //! pointer to the reference counter;\n    //! when GpuMat points to user-allocated data, the pointer is NULL\n    int* refcount;\n\n    //! helper fields used in locateROI and adjustROI\n    uchar* datastart;\n    const uchar* dataend;\n\n    //! allocator\n    Allocator* allocator;\n};\n\n/** @brief Creates a continuous matrix.\n\n@param rows Row count.\n@param cols Column count.\n@param type Type of the matrix.\n@param arr Destination matrix. This parameter changes only if it has a proper type and area (\n\\f$\\texttt{rows} \\times \\texttt{cols}\\f$ ).\n\nMatrix is called continuous if its elements are stored continuously, that is, without gaps at the\nend of each row.\n */\nCV_EXPORTS void createContinuous(int rows, int cols, int type, OutputArray arr);\n\n/** @brief Ensures that the size of a matrix is big enough and the matrix has a proper type.\n\n@param rows Minimum desired number of rows.\n@param cols Minimum desired number of columns.\n@param type Desired matrix type.\n@param arr Destination matrix.\n\nThe function does not reallocate memory if the matrix has proper attributes already.\n */\nCV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr);\n\n//! BufferPool management (must be called before Stream creation)\nCV_EXPORTS void setBufferPoolUsage(bool on);\nCV_EXPORTS void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount);\n\n//===================================================================================\n// HostMem\n//===================================================================================\n\n/** @brief Class with reference counting wrapping special memory type allocation functions from CUDA.\n\nIts interface is also Mat-like but with additional memory type parameters.\n\n-   **PAGE_LOCKED** sets a page locked memory type used commonly for fast and asynchronous\n    uploading/downloading data from/to GPU.\n-   **SHARED** specifies a zero copy memory allocation that enables mapping the host memory to GPU\n    address space, if supported.\n-   **WRITE_COMBINED** sets the write combined buffer that is not cached by CPU. Such buffers are\n    used to supply GPU with data when GPU only reads it. The advantage is a better CPU cache\n    utilization.\n\n@note Allocation size of such memory types is usually limited. For more details, see *CUDA 2.2\nPinned Memory APIs* document or *CUDA C Programming Guide*.\n */\nclass CV_EXPORTS HostMem\n{\npublic:\n    enum AllocType { PAGE_LOCKED = 1, SHARED = 2, WRITE_COMBINED = 4 };\n\n    static MatAllocator* getAllocator(AllocType alloc_type = PAGE_LOCKED);\n\n    explicit HostMem(AllocType alloc_type = PAGE_LOCKED);\n\n    HostMem(const HostMem& m);\n\n    HostMem(int rows, int cols, int type, AllocType alloc_type = PAGE_LOCKED);\n    HostMem(Size size, int type, AllocType alloc_type = PAGE_LOCKED);\n\n    //! creates from host memory with coping data\n    explicit HostMem(InputArray arr, AllocType alloc_type = PAGE_LOCKED);\n\n    ~HostMem();\n\n    HostMem& operator =(const HostMem& m);\n\n    //! swaps with other smart pointer\n    void swap(HostMem& b);\n\n    //! returns deep copy of the matrix, i.e. the data is copied\n    HostMem clone() const;\n\n    //! allocates new matrix data unless the matrix already has specified size and type.\n    void create(int rows, int cols, int type);\n    void create(Size size, int type);\n\n    //! creates alternative HostMem header for the same data, with different\n    //! number of channels and/or different number of rows\n    HostMem reshape(int cn, int rows = 0) const;\n\n    //! decrements reference counter and released memory if needed.\n    void release();\n\n    //! returns matrix header with disabled reference counting for HostMem data.\n    Mat createMatHeader() const;\n\n    /** @brief Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting\n    for it.\n\n    This can be done only if memory was allocated with the SHARED flag and if it is supported by the\n    hardware. Laptops often share video and CPU memory, so address spaces can be mapped, which\n    eliminates an extra copy.\n     */\n    GpuMat createGpuMatHeader() const;\n\n    // Please see cv::Mat for descriptions\n    bool isContinuous() const;\n    size_t elemSize() const;\n    size_t elemSize1() const;\n    int type() const;\n    int depth() const;\n    int channels() const;\n    size_t step1() const;\n    Size size() const;\n    bool empty() const;\n\n    // Please see cv::Mat for descriptions\n    int flags;\n    int rows, cols;\n    size_t step;\n\n    uchar* data;\n    int* refcount;\n\n    uchar* datastart;\n    const uchar* dataend;\n\n    AllocType alloc_type;\n};\n\n/** @brief Page-locks the memory of matrix and maps it for the device(s).\n\n@param m Input matrix.\n */\nCV_EXPORTS void registerPageLocked(Mat& m);\n\n/** @brief Unmaps the memory of matrix and makes it pageable again.\n\n@param m Input matrix.\n */\nCV_EXPORTS void unregisterPageLocked(Mat& m);\n\n//===================================================================================\n// Stream\n//===================================================================================\n\n/** @brief This class encapsulates a queue of asynchronous calls.\n\n@note Currently, you may face problems if an operation is enqueued twice with different data. Some\nfunctions use the constant GPU memory, and next call may update the memory before the previous one\nhas been finished. But calling different operations asynchronously is safe because each operation\nhas its own constant buffer. Memory copy/upload/download/set operations to the buffers you hold are\nalso safe. :\n */\nclass CV_EXPORTS Stream\n{\n    typedef void (Stream::*bool_type)() const;\n    void this_type_does_not_support_comparisons() const {}\n\npublic:\n    typedef void (*StreamCallback)(int status, void* userData);\n\n    //! creates a new asynchronous stream\n    Stream();\n\n    /** @brief Returns true if the current stream queue is finished. Otherwise, it returns false.\n    */\n    bool queryIfComplete() const;\n\n    /** @brief Blocks the current CPU thread until all operations in the stream are complete.\n    */\n    void waitForCompletion();\n\n    /** @brief Makes a compute stream wait on an event.\n    */\n    void waitEvent(const Event& event);\n\n    /** @brief Adds a callback to be called on the host after all currently enqueued items in the stream have\n    completed.\n\n    @note Callbacks must not make any CUDA API calls. Callbacks must not perform any synchronization\n    that may depend on outstanding device work or other callbacks that are not mandated to run earlier.\n    Callbacks without a mandated order (in independent streams) execute in undefined order and may be\n    serialized.\n     */\n    void enqueueHostCallback(StreamCallback callback, void* userData);\n\n    //! return Stream object for default CUDA stream\n    static Stream& Null();\n\n    //! returns true if stream object is not default (!= 0)\n    operator bool_type() const;\n\n    class Impl;\n\nprivate:\n    Ptr<Impl> impl_;\n    Stream(const Ptr<Impl>& impl);\n\n    friend struct StreamAccessor;\n    friend class BufferPool;\n    friend class DefaultDeviceInitializer;\n};\n\nclass CV_EXPORTS Event\n{\npublic:\n    enum CreateFlags\n    {\n        DEFAULT        = 0x00,  /**< Default event flag */\n        BLOCKING_SYNC  = 0x01,  /**< Event uses blocking synchronization */\n        DISABLE_TIMING = 0x02,  /**< Event will not record timing data */\n        INTERPROCESS   = 0x04   /**< Event is suitable for interprocess use. DisableTiming must be set */\n    };\n\n    explicit Event(CreateFlags flags = DEFAULT);\n\n    //! records an event\n    void record(Stream& stream = Stream::Null());\n\n    //! queries an event's status\n    bool queryIfComplete() const;\n\n    //! waits for an event to complete\n    void waitForCompletion();\n\n    //! computes the elapsed time between events\n    static float elapsedTime(const Event& start, const Event& end);\n\n    class Impl;\n\nprivate:\n    Ptr<Impl> impl_;\n    Event(const Ptr<Impl>& impl);\n\n    friend struct EventAccessor;\n};\n\n//! @} cudacore_struct\n\n//===================================================================================\n// Initialization & Info\n//===================================================================================\n\n//! @addtogroup cudacore_init\n//! @{\n\n/** @brief Returns the number of installed CUDA-enabled devices.\n\nUse this function before any other CUDA functions calls. If OpenCV is compiled without CUDA support,\nthis function returns 0.\n */\nCV_EXPORTS int getCudaEnabledDeviceCount();\n\n/** @brief Sets a device and initializes it for the current thread.\n\n@param device System index of a CUDA device starting with 0.\n\nIf the call of this function is omitted, a default device is initialized at the fist CUDA usage.\n */\nCV_EXPORTS void setDevice(int device);\n\n/** @brief Returns the current device index set by cuda::setDevice or initialized by default.\n */\nCV_EXPORTS int getDevice();\n\n/** @brief Explicitly destroys and cleans up all resources associated with the current device in the current\nprocess.\n\nAny subsequent API call to this device will reinitialize the device.\n */\nCV_EXPORTS void resetDevice();\n\n/** @brief Enumeration providing CUDA computing features.\n */\nenum FeatureSet\n{\n    FEATURE_SET_COMPUTE_10 = 10,\n    FEATURE_SET_COMPUTE_11 = 11,\n    FEATURE_SET_COMPUTE_12 = 12,\n    FEATURE_SET_COMPUTE_13 = 13,\n    FEATURE_SET_COMPUTE_20 = 20,\n    FEATURE_SET_COMPUTE_21 = 21,\n    FEATURE_SET_COMPUTE_30 = 30,\n    FEATURE_SET_COMPUTE_32 = 32,\n    FEATURE_SET_COMPUTE_35 = 35,\n    FEATURE_SET_COMPUTE_50 = 50,\n\n    GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11,\n    SHARED_ATOMICS = FEATURE_SET_COMPUTE_12,\n    NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13,\n    WARP_SHUFFLE_FUNCTIONS = FEATURE_SET_COMPUTE_30,\n    DYNAMIC_PARALLELISM = FEATURE_SET_COMPUTE_35\n};\n\n//! checks whether current device supports the given feature\nCV_EXPORTS bool deviceSupports(FeatureSet feature_set);\n\n/** @brief Class providing a set of static methods to check what NVIDIA\\* card architecture the CUDA module was\nbuilt for.\n\nAccording to the CUDA C Programming Guide Version 3.2: \"PTX code produced for some specific compute\ncapability can always be compiled to binary code of greater or equal compute capability\".\n */\nclass CV_EXPORTS TargetArchs\n{\npublic:\n    /** @brief The following method checks whether the module was built with the support of the given feature:\n\n    @param feature_set Features to be checked. See :ocvcuda::FeatureSet.\n     */\n    static bool builtWith(FeatureSet feature_set);\n\n    /** @brief There is a set of methods to check whether the module contains intermediate (PTX) or binary CUDA\n    code for the given architecture(s):\n\n    @param major Major compute capability version.\n    @param minor Minor compute capability version.\n     */\n    static bool has(int major, int minor);\n    static bool hasPtx(int major, int minor);\n    static bool hasBin(int major, int minor);\n\n    static bool hasEqualOrLessPtx(int major, int minor);\n    static bool hasEqualOrGreater(int major, int minor);\n    static bool hasEqualOrGreaterPtx(int major, int minor);\n    static bool hasEqualOrGreaterBin(int major, int minor);\n};\n\n/** @brief Class providing functionality for querying the specified GPU properties.\n */\nclass CV_EXPORTS DeviceInfo\n{\npublic:\n    //! creates DeviceInfo object for the current GPU\n    DeviceInfo();\n\n    /** @brief The constructors.\n\n    @param device_id System index of the CUDA device starting with 0.\n\n    Constructs the DeviceInfo object for the specified device. If device_id parameter is missed, it\n    constructs an object for the current device.\n     */\n    DeviceInfo(int device_id);\n\n    /** @brief Returns system index of the CUDA device starting with 0.\n    */\n    int deviceID() const;\n\n    //! ASCII string identifying device\n    const char* name() const;\n\n    //! global memory available on device in bytes\n    size_t totalGlobalMem() const;\n\n    //! shared memory available per block in bytes\n    size_t sharedMemPerBlock() const;\n\n    //! 32-bit registers available per block\n    int regsPerBlock() const;\n\n    //! warp size in threads\n    int warpSize() const;\n\n    //! maximum pitch in bytes allowed by memory copies\n    size_t memPitch() const;\n\n    //! maximum number of threads per block\n    int maxThreadsPerBlock() const;\n\n    //! maximum size of each dimension of a block\n    Vec3i maxThreadsDim() const;\n\n    //! maximum size of each dimension of a grid\n    Vec3i maxGridSize() const;\n\n    //! clock frequency in kilohertz\n    int clockRate() const;\n\n    //! constant memory available on device in bytes\n    size_t totalConstMem() const;\n\n    //! major compute capability\n    int majorVersion() const;\n\n    //! minor compute capability\n    int minorVersion() const;\n\n    //! alignment requirement for textures\n    size_t textureAlignment() const;\n\n    //! pitch alignment requirement for texture references bound to pitched memory\n    size_t texturePitchAlignment() const;\n\n    //! number of multiprocessors on device\n    int multiProcessorCount() const;\n\n    //! specified whether there is a run time limit on kernels\n    bool kernelExecTimeoutEnabled() const;\n\n    //! device is integrated as opposed to discrete\n    bool integrated() const;\n\n    //! device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer\n    bool canMapHostMemory() const;\n\n    enum ComputeMode\n    {\n        ComputeModeDefault,         /**< default compute mode (Multiple threads can use cudaSetDevice with this device) */\n        ComputeModeExclusive,       /**< compute-exclusive-thread mode (Only one thread in one process will be able to use cudaSetDevice with this device) */\n        ComputeModeProhibited,      /**< compute-prohibited mode (No threads can use cudaSetDevice with this device) */\n        ComputeModeExclusiveProcess /**< compute-exclusive-process mode (Many threads in one process will be able to use cudaSetDevice with this device) */\n    };\n\n    //! compute mode\n    ComputeMode computeMode() const;\n\n    //! maximum 1D texture size\n    int maxTexture1D() const;\n\n    //! maximum 1D mipmapped texture size\n    int maxTexture1DMipmap() const;\n\n    //! maximum size for 1D textures bound to linear memory\n    int maxTexture1DLinear() const;\n\n    //! maximum 2D texture dimensions\n    Vec2i maxTexture2D() const;\n\n    //! maximum 2D mipmapped texture dimensions\n    Vec2i maxTexture2DMipmap() const;\n\n    //! maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory\n    Vec3i maxTexture2DLinear() const;\n\n    //! maximum 2D texture dimensions if texture gather operations have to be performed\n    Vec2i maxTexture2DGather() const;\n\n    //! maximum 3D texture dimensions\n    Vec3i maxTexture3D() const;\n\n    //! maximum Cubemap texture dimensions\n    int maxTextureCubemap() const;\n\n    //! maximum 1D layered texture dimensions\n    Vec2i maxTexture1DLayered() const;\n\n    //! maximum 2D layered texture dimensions\n    Vec3i maxTexture2DLayered() const;\n\n    //! maximum Cubemap layered texture dimensions\n    Vec2i maxTextureCubemapLayered() const;\n\n    //! maximum 1D surface size\n    int maxSurface1D() const;\n\n    //! maximum 2D surface dimensions\n    Vec2i maxSurface2D() const;\n\n    //! maximum 3D surface dimensions\n    Vec3i maxSurface3D() const;\n\n    //! maximum 1D layered surface dimensions\n    Vec2i maxSurface1DLayered() const;\n\n    //! maximum 2D layered surface dimensions\n    Vec3i maxSurface2DLayered() const;\n\n    //! maximum Cubemap surface dimensions\n    int maxSurfaceCubemap() const;\n\n    //! maximum Cubemap layered surface dimensions\n    Vec2i maxSurfaceCubemapLayered() const;\n\n    //! alignment requirements for surfaces\n    size_t surfaceAlignment() const;\n\n    //! device can possibly execute multiple kernels concurrently\n    bool concurrentKernels() const;\n\n    //! device has ECC support enabled\n    bool ECCEnabled() const;\n\n    //! PCI bus ID of the device\n    int pciBusID() const;\n\n    //! PCI device ID of the device\n    int pciDeviceID() const;\n\n    //! PCI domain ID of the device\n    int pciDomainID() const;\n\n    //! true if device is a Tesla device using TCC driver, false otherwise\n    bool tccDriver() const;\n\n    //! number of asynchronous engines\n    int asyncEngineCount() const;\n\n    //! device shares a unified address space with the host\n    bool unifiedAddressing() const;\n\n    //! peak memory clock frequency in kilohertz\n    int memoryClockRate() const;\n\n    //! global memory bus width in bits\n    int memoryBusWidth() const;\n\n    //! size of L2 cache in bytes\n    int l2CacheSize() const;\n\n    //! maximum resident threads per multiprocessor\n    int maxThreadsPerMultiProcessor() const;\n\n    //! gets free and total device memory\n    void queryMemory(size_t& totalMemory, size_t& freeMemory) const;\n    size_t freeMemory() const;\n    size_t totalMemory() const;\n\n    /** @brief Provides information on CUDA feature support.\n\n    @param feature_set Features to be checked. See cuda::FeatureSet.\n\n    This function returns true if the device has the specified CUDA feature. Otherwise, it returns false\n     */\n    bool supports(FeatureSet feature_set) const;\n\n    /** @brief Checks the CUDA module and device compatibility.\n\n    This function returns true if the CUDA module can be run on the specified device. Otherwise, it\n    returns false .\n     */\n    bool isCompatible() const;\n\nprivate:\n    int device_id_;\n};\n\nCV_EXPORTS void printCudaDeviceInfo(int device);\nCV_EXPORTS void printShortCudaDeviceInfo(int device);\n\n//! @} cudacore_init\n\n}} // namespace cv { namespace cuda {\n\n\n#include \"opencv2/core/cuda.inl.hpp\"\n\n#endif /* __OPENCV_CORE_CUDA_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda.inl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_CUDAINL_HPP__\n#define __OPENCV_CORE_CUDAINL_HPP__\n\n#include \"opencv2/core/cuda.hpp\"\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda {\n\n//===================================================================================\n// GpuMat\n//===================================================================================\n\ninline\nGpuMat::GpuMat(Allocator* allocator_)\n    : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)\n{}\n\ninline\nGpuMat::GpuMat(int rows_, int cols_, int type_, Allocator* allocator_)\n    : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)\n{\n    if (rows_ > 0 && cols_ > 0)\n        create(rows_, cols_, type_);\n}\n\ninline\nGpuMat::GpuMat(Size size_, int type_, Allocator* allocator_)\n    : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)\n{\n    if (size_.height > 0 && size_.width > 0)\n        create(size_.height, size_.width, type_);\n}\n\ninline\nGpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_, Allocator* allocator_)\n    : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)\n{\n    if (rows_ > 0 && cols_ > 0)\n    {\n        create(rows_, cols_, type_);\n        setTo(s_);\n    }\n}\n\ninline\nGpuMat::GpuMat(Size size_, int type_, Scalar s_, Allocator* allocator_)\n    : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)\n{\n    if (size_.height > 0 && size_.width > 0)\n    {\n        create(size_.height, size_.width, type_);\n        setTo(s_);\n    }\n}\n\ninline\nGpuMat::GpuMat(const GpuMat& m)\n    : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), allocator(m.allocator)\n{\n    if (refcount)\n        CV_XADD(refcount, 1);\n}\n\ninline\nGpuMat::GpuMat(InputArray arr, Allocator* allocator_) :\n    flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)\n{\n    upload(arr);\n}\n\ninline\nGpuMat::~GpuMat()\n{\n    release();\n}\n\ninline\nGpuMat& GpuMat::operator =(const GpuMat& m)\n{\n    if (this != &m)\n    {\n        GpuMat temp(m);\n        swap(temp);\n    }\n\n    return *this;\n}\n\ninline\nvoid GpuMat::create(Size size_, int type_)\n{\n    create(size_.height, size_.width, type_);\n}\n\ninline\nvoid GpuMat::swap(GpuMat& b)\n{\n    std::swap(flags, b.flags);\n    std::swap(rows, b.rows);\n    std::swap(cols, b.cols);\n    std::swap(step, b.step);\n    std::swap(data, b.data);\n    std::swap(datastart, b.datastart);\n    std::swap(dataend, b.dataend);\n    std::swap(refcount, b.refcount);\n    std::swap(allocator, b.allocator);\n}\n\ninline\nGpuMat GpuMat::clone() const\n{\n    GpuMat m;\n    copyTo(m);\n    return m;\n}\n\ninline\nvoid GpuMat::copyTo(OutputArray dst, InputArray mask) const\n{\n    copyTo(dst, mask, Stream::Null());\n}\n\ninline\nGpuMat& GpuMat::setTo(Scalar s)\n{\n    return setTo(s, Stream::Null());\n}\n\ninline\nGpuMat& GpuMat::setTo(Scalar s, InputArray mask)\n{\n    return setTo(s, mask, Stream::Null());\n}\n\ninline\nvoid GpuMat::convertTo(OutputArray dst, int rtype) const\n{\n    convertTo(dst, rtype, Stream::Null());\n}\n\ninline\nvoid GpuMat::convertTo(OutputArray dst, int rtype, double alpha, double beta) const\n{\n    convertTo(dst, rtype, alpha, beta, Stream::Null());\n}\n\ninline\nvoid GpuMat::convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const\n{\n    convertTo(dst, rtype, alpha, 0.0, stream);\n}\n\ninline\nvoid GpuMat::assignTo(GpuMat& m, int _type) const\n{\n    if (_type < 0)\n        m = *this;\n    else\n        convertTo(m, _type);\n}\n\ninline\nuchar* GpuMat::ptr(int y)\n{\n    CV_DbgAssert( (unsigned)y < (unsigned)rows );\n    return data + step * y;\n}\n\ninline\nconst uchar* GpuMat::ptr(int y) const\n{\n    CV_DbgAssert( (unsigned)y < (unsigned)rows );\n    return data + step * y;\n}\n\ntemplate<typename _Tp> inline\n_Tp* GpuMat::ptr(int y)\n{\n    return (_Tp*)ptr(y);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp* GpuMat::ptr(int y) const\n{\n    return (const _Tp*)ptr(y);\n}\n\ntemplate <class T> inline\nGpuMat::operator PtrStepSz<T>() const\n{\n    return PtrStepSz<T>(rows, cols, (T*)data, step);\n}\n\ntemplate <class T> inline\nGpuMat::operator PtrStep<T>() const\n{\n    return PtrStep<T>((T*)data, step);\n}\n\ninline\nGpuMat GpuMat::row(int y) const\n{\n    return GpuMat(*this, Range(y, y+1), Range::all());\n}\n\ninline\nGpuMat GpuMat::col(int x) const\n{\n    return GpuMat(*this, Range::all(), Range(x, x+1));\n}\n\ninline\nGpuMat GpuMat::rowRange(int startrow, int endrow) const\n{\n    return GpuMat(*this, Range(startrow, endrow), Range::all());\n}\n\ninline\nGpuMat GpuMat::rowRange(Range r) const\n{\n    return GpuMat(*this, r, Range::all());\n}\n\ninline\nGpuMat GpuMat::colRange(int startcol, int endcol) const\n{\n    return GpuMat(*this, Range::all(), Range(startcol, endcol));\n}\n\ninline\nGpuMat GpuMat::colRange(Range r) const\n{\n    return GpuMat(*this, Range::all(), r);\n}\n\ninline\nGpuMat GpuMat::operator ()(Range rowRange_, Range colRange_) const\n{\n    return GpuMat(*this, rowRange_, colRange_);\n}\n\ninline\nGpuMat GpuMat::operator ()(Rect roi) const\n{\n    return GpuMat(*this, roi);\n}\n\ninline\nbool GpuMat::isContinuous() const\n{\n    return (flags & Mat::CONTINUOUS_FLAG) != 0;\n}\n\ninline\nsize_t GpuMat::elemSize() const\n{\n    return CV_ELEM_SIZE(flags);\n}\n\ninline\nsize_t GpuMat::elemSize1() const\n{\n    return CV_ELEM_SIZE1(flags);\n}\n\ninline\nint GpuMat::type() const\n{\n    return CV_MAT_TYPE(flags);\n}\n\ninline\nint GpuMat::depth() const\n{\n    return CV_MAT_DEPTH(flags);\n}\n\ninline\nint GpuMat::channels() const\n{\n    return CV_MAT_CN(flags);\n}\n\ninline\nsize_t GpuMat::step1() const\n{\n    return step / elemSize1();\n}\n\ninline\nSize GpuMat::size() const\n{\n    return Size(cols, rows);\n}\n\ninline\nbool GpuMat::empty() const\n{\n    return data == 0;\n}\n\nstatic inline\nGpuMat createContinuous(int rows, int cols, int type)\n{\n    GpuMat m;\n    createContinuous(rows, cols, type, m);\n    return m;\n}\n\nstatic inline\nvoid createContinuous(Size size, int type, OutputArray arr)\n{\n    createContinuous(size.height, size.width, type, arr);\n}\n\nstatic inline\nGpuMat createContinuous(Size size, int type)\n{\n    GpuMat m;\n    createContinuous(size, type, m);\n    return m;\n}\n\nstatic inline\nvoid ensureSizeIsEnough(Size size, int type, OutputArray arr)\n{\n    ensureSizeIsEnough(size.height, size.width, type, arr);\n}\n\nstatic inline\nvoid swap(GpuMat& a, GpuMat& b)\n{\n    a.swap(b);\n}\n\n//===================================================================================\n// HostMem\n//===================================================================================\n\ninline\nHostMem::HostMem(AllocType alloc_type_)\n    : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)\n{\n}\n\ninline\nHostMem::HostMem(const HostMem& m)\n    : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)\n{\n    if( refcount )\n        CV_XADD(refcount, 1);\n}\n\ninline\nHostMem::HostMem(int rows_, int cols_, int type_, AllocType alloc_type_)\n    : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)\n{\n    if (rows_ > 0 && cols_ > 0)\n        create(rows_, cols_, type_);\n}\n\ninline\nHostMem::HostMem(Size size_, int type_, AllocType alloc_type_)\n    : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)\n{\n    if (size_.height > 0 && size_.width > 0)\n        create(size_.height, size_.width, type_);\n}\n\ninline\nHostMem::HostMem(InputArray arr, AllocType alloc_type_)\n    : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)\n{\n    arr.getMat().copyTo(*this);\n}\n\ninline\nHostMem::~HostMem()\n{\n    release();\n}\n\ninline\nHostMem& HostMem::operator =(const HostMem& m)\n{\n    if (this != &m)\n    {\n        HostMem temp(m);\n        swap(temp);\n    }\n\n    return *this;\n}\n\ninline\nvoid HostMem::swap(HostMem& b)\n{\n    std::swap(flags, b.flags);\n    std::swap(rows, b.rows);\n    std::swap(cols, b.cols);\n    std::swap(step, b.step);\n    std::swap(data, b.data);\n    std::swap(datastart, b.datastart);\n    std::swap(dataend, b.dataend);\n    std::swap(refcount, b.refcount);\n    std::swap(alloc_type, b.alloc_type);\n}\n\ninline\nHostMem HostMem::clone() const\n{\n    HostMem m(size(), type(), alloc_type);\n    createMatHeader().copyTo(m);\n    return m;\n}\n\ninline\nvoid HostMem::create(Size size_, int type_)\n{\n    create(size_.height, size_.width, type_);\n}\n\ninline\nMat HostMem::createMatHeader() const\n{\n    return Mat(size(), type(), data, step);\n}\n\ninline\nbool HostMem::isContinuous() const\n{\n    return (flags & Mat::CONTINUOUS_FLAG) != 0;\n}\n\ninline\nsize_t HostMem::elemSize() const\n{\n    return CV_ELEM_SIZE(flags);\n}\n\ninline\nsize_t HostMem::elemSize1() const\n{\n    return CV_ELEM_SIZE1(flags);\n}\n\ninline\nint HostMem::type() const\n{\n    return CV_MAT_TYPE(flags);\n}\n\ninline\nint HostMem::depth() const\n{\n    return CV_MAT_DEPTH(flags);\n}\n\ninline\nint HostMem::channels() const\n{\n    return CV_MAT_CN(flags);\n}\n\ninline\nsize_t HostMem::step1() const\n{\n    return step / elemSize1();\n}\n\ninline\nSize HostMem::size() const\n{\n    return Size(cols, rows);\n}\n\ninline\nbool HostMem::empty() const\n{\n    return data == 0;\n}\n\nstatic inline\nvoid swap(HostMem& a, HostMem& b)\n{\n    a.swap(b);\n}\n\n//===================================================================================\n// Stream\n//===================================================================================\n\ninline\nStream::Stream(const Ptr<Impl>& impl)\n    : impl_(impl)\n{\n}\n\n//===================================================================================\n// Event\n//===================================================================================\n\ninline\nEvent::Event(const Ptr<Impl>& impl)\n    : impl_(impl)\n{\n}\n\n//===================================================================================\n// Initialization & Info\n//===================================================================================\n\ninline\nbool TargetArchs::has(int major, int minor)\n{\n    return hasPtx(major, minor) || hasBin(major, minor);\n}\n\ninline\nbool TargetArchs::hasEqualOrGreater(int major, int minor)\n{\n    return hasEqualOrGreaterPtx(major, minor) || hasEqualOrGreaterBin(major, minor);\n}\n\ninline\nDeviceInfo::DeviceInfo()\n{\n    device_id_ = getDevice();\n}\n\ninline\nDeviceInfo::DeviceInfo(int device_id)\n{\n    CV_Assert( device_id >= 0 && device_id < getCudaEnabledDeviceCount() );\n    device_id_ = device_id;\n}\n\ninline\nint DeviceInfo::deviceID() const\n{\n    return device_id_;\n}\n\ninline\nsize_t DeviceInfo::freeMemory() const\n{\n    size_t _totalMemory = 0, _freeMemory = 0;\n    queryMemory(_totalMemory, _freeMemory);\n    return _freeMemory;\n}\n\ninline\nsize_t DeviceInfo::totalMemory() const\n{\n    size_t _totalMemory = 0, _freeMemory = 0;\n    queryMemory(_totalMemory, _freeMemory);\n    return _totalMemory;\n}\n\ninline\nbool DeviceInfo::supports(FeatureSet feature_set) const\n{\n    int version = majorVersion() * 10 + minorVersion();\n    return version >= feature_set;\n}\n\n\n}} // namespace cv { namespace cuda {\n\n//===================================================================================\n// Mat\n//===================================================================================\n\nnamespace cv {\n\ninline\nMat::Mat(const cuda::GpuMat& m)\n    : flags(0), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows)\n{\n    m.download(*this);\n}\n\n}\n\n//! @endcond\n\n#endif // __OPENCV_CORE_CUDAINL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda_stream_accessor.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP__\n#define __OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP__\n\n#ifndef __cplusplus\n#  error cuda_stream_accessor.hpp header must be compiled as C++\n#endif\n\n/** @file cuda_stream_accessor.hpp\n * This is only header file that depends on CUDA Runtime API. All other headers are independent.\n */\n\n#include <cuda_runtime.h>\n#include \"opencv2/core/cuda.hpp\"\n\nnamespace cv\n{\n    namespace cuda\n    {\n\n//! @addtogroup cudacore_struct\n//! @{\n\n        /** @brief Class that enables getting cudaStream_t from cuda::Stream\n         */\n        struct StreamAccessor\n        {\n            CV_EXPORTS static cudaStream_t getStream(const Stream& stream);\n            CV_EXPORTS static Stream wrapStream(cudaStream_t stream);\n        };\n\n        /** @brief Class that enables getting cudaEvent_t from cuda::Event\n         */\n        struct EventAccessor\n        {\n            CV_EXPORTS static cudaEvent_t getEvent(const Event& event);\n            CV_EXPORTS static Event wrapEvent(cudaEvent_t event);\n        };\n\n//! @}\n\n    }\n}\n\n#endif /* __OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cuda_types.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_CUDA_TYPES_HPP__\n#define __OPENCV_CORE_CUDA_TYPES_HPP__\n\n#ifndef __cplusplus\n#  error cuda_types.hpp header must be compiled as C++\n#endif\n\n/** @file\n * @deprecated Use @ref cudev instead.\n */\n\n//! @cond IGNORED\n\n#ifdef __CUDACC__\n    #define __CV_CUDA_HOST_DEVICE__ __host__ __device__ __forceinline__\n#else\n    #define __CV_CUDA_HOST_DEVICE__\n#endif\n\nnamespace cv\n{\n    namespace cuda\n    {\n\n        // Simple lightweight structures that encapsulates information about an image on device.\n        // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile\n\n        template <typename T> struct DevPtr\n        {\n            typedef T elem_type;\n            typedef int index_type;\n\n            enum { elem_size = sizeof(elem_type) };\n\n            T* data;\n\n            __CV_CUDA_HOST_DEVICE__ DevPtr() : data(0) {}\n            __CV_CUDA_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}\n\n            __CV_CUDA_HOST_DEVICE__ size_t elemSize() const { return elem_size; }\n            __CV_CUDA_HOST_DEVICE__ operator       T*()       { return data; }\n            __CV_CUDA_HOST_DEVICE__ operator const T*() const { return data; }\n        };\n\n        template <typename T> struct PtrSz : public DevPtr<T>\n        {\n            __CV_CUDA_HOST_DEVICE__ PtrSz() : size(0) {}\n            __CV_CUDA_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}\n\n            size_t size;\n        };\n\n        template <typename T> struct PtrStep : public DevPtr<T>\n        {\n            __CV_CUDA_HOST_DEVICE__ PtrStep() : step(0) {}\n            __CV_CUDA_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}\n\n            size_t step;\n\n            __CV_CUDA_HOST_DEVICE__       T* ptr(int y = 0)       { return (      T*)( (      char*)DevPtr<T>::data + y * step); }\n            __CV_CUDA_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }\n\n            __CV_CUDA_HOST_DEVICE__       T& operator ()(int y, int x)       { return ptr(y)[x]; }\n            __CV_CUDA_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }\n        };\n\n        template <typename T> struct PtrStepSz : public PtrStep<T>\n        {\n            __CV_CUDA_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}\n            __CV_CUDA_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)\n                : PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}\n\n            template <typename U>\n            explicit PtrStepSz(const PtrStepSz<U>& d) : PtrStep<T>((T*)d.data, d.step), cols(d.cols), rows(d.rows){}\n\n            int cols;\n            int rows;\n        };\n\n        typedef PtrStepSz<unsigned char> PtrStepSzb;\n        typedef PtrStepSz<float> PtrStepSzf;\n        typedef PtrStepSz<int> PtrStepSzi;\n\n        typedef PtrStep<unsigned char> PtrStepb;\n        typedef PtrStep<float> PtrStepf;\n        typedef PtrStep<int> PtrStepi;\n\n    }\n}\n\n//! @endcond\n\n#endif /* __OPENCV_CORE_CUDA_TYPES_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cvdef.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_CVDEF_H__\n#define __OPENCV_CORE_CVDEF_H__\n\n//! @addtogroup core_utils\n//! @{\n\n#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER && _MSC_VER > 1300\n#  define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio warnings */\n#endif\n\n// undef problematic defines sometimes defined by system headers (windows.h in particular)\n#undef small\n#undef min\n#undef max\n#undef abs\n#undef Complex\n\n#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER && _MSC_VER > 1300\n#  define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio warnings */\n#endif\n\n#include <limits.h>\n#include \"opencv2/core/hal/interface.h\"\n\n#if defined __ICL\n#  define CV_ICC   __ICL\n#elif defined __ICC\n#  define CV_ICC   __ICC\n#elif defined __ECL\n#  define CV_ICC   __ECL\n#elif defined __ECC\n#  define CV_ICC   __ECC\n#elif defined __INTEL_COMPILER\n#  define CV_ICC   __INTEL_COMPILER\n#endif\n\n#ifndef CV_INLINE\n#  if defined __cplusplus\n#    define CV_INLINE static inline\n#  elif defined _MSC_VER\n#    define CV_INLINE __inline\n#  else\n#    define CV_INLINE static\n#  endif\n#endif\n\n#if defined CV_ICC && !defined CV_ENABLE_UNROLLED\n#  define CV_ENABLE_UNROLLED 0\n#else\n#  define CV_ENABLE_UNROLLED 1\n#endif\n\n#ifdef __GNUC__\n#  define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x)))\n#elif defined _MSC_VER\n#  define CV_DECL_ALIGNED(x) __declspec(align(x))\n#else\n#  define CV_DECL_ALIGNED(x)\n#endif\n\n/* CPU features and intrinsics support */\n#define CV_CPU_NONE             0\n#define CV_CPU_MMX              1\n#define CV_CPU_SSE              2\n#define CV_CPU_SSE2             3\n#define CV_CPU_SSE3             4\n#define CV_CPU_SSSE3            5\n#define CV_CPU_SSE4_1           6\n#define CV_CPU_SSE4_2           7\n#define CV_CPU_POPCNT           8\n\n#define CV_CPU_AVX              10\n#define CV_CPU_AVX2             11\n#define CV_CPU_FMA3             12\n\n#define CV_CPU_AVX_512F         13\n#define CV_CPU_AVX_512BW        14\n#define CV_CPU_AVX_512CD        15\n#define CV_CPU_AVX_512DQ        16\n#define CV_CPU_AVX_512ER        17\n#define CV_CPU_AVX_512IFMA512   18\n#define CV_CPU_AVX_512PF        19\n#define CV_CPU_AVX_512VBMI      20\n#define CV_CPU_AVX_512VL        21\n\n#define CV_CPU_NEON   100\n\n// when adding to this list remember to update the following enum\n#define CV_HARDWARE_MAX_FEATURE 255\n\n/** @brief Available CPU features.\n*/\nenum CpuFeatures {\n    CPU_MMX             = 1,\n    CPU_SSE             = 2,\n    CPU_SSE2            = 3,\n    CPU_SSE3            = 4,\n    CPU_SSSE3           = 5,\n    CPU_SSE4_1          = 6,\n    CPU_SSE4_2          = 7,\n    CPU_POPCNT          = 8,\n\n    CPU_AVX             = 10,\n    CPU_AVX2            = 11,\n    CPU_FMA3            = 12,\n\n    CPU_AVX_512F        = 13,\n    CPU_AVX_512BW       = 14,\n    CPU_AVX_512CD       = 15,\n    CPU_AVX_512DQ       = 16,\n    CPU_AVX_512ER       = 17,\n    CPU_AVX_512IFMA512  = 18,\n    CPU_AVX_512PF       = 19,\n    CPU_AVX_512VBMI     = 20,\n    CPU_AVX_512VL       = 21,\n\n    CPU_NEON            = 100\n};\n\n// do not include SSE/AVX/NEON headers for NVCC compiler\n#ifndef __CUDACC__\n\n#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2)\n#  include <emmintrin.h>\n#  define CV_MMX 1\n#  define CV_SSE 1\n#  define CV_SSE2 1\n#  if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)\n#    include <pmmintrin.h>\n#    define CV_SSE3 1\n#  endif\n#  if defined __SSSE3__  || (defined _MSC_VER && _MSC_VER >= 1500)\n#    include <tmmintrin.h>\n#    define CV_SSSE3 1\n#  endif\n#  if defined __SSE4_1__ || (defined _MSC_VER && _MSC_VER >= 1500)\n#    include <smmintrin.h>\n#    define CV_SSE4_1 1\n#  endif\n#  if defined __SSE4_2__ || (defined _MSC_VER && _MSC_VER >= 1500)\n#    include <nmmintrin.h>\n#    define CV_SSE4_2 1\n#  endif\n#  if defined __POPCNT__ || (defined _MSC_VER && _MSC_VER >= 1500)\n#    ifdef _MSC_VER\n#      include <nmmintrin.h>\n#    else\n#      include <popcntintrin.h>\n#    endif\n#    define CV_POPCNT 1\n#  endif\n#  if defined __AVX__ || (defined _MSC_VER && _MSC_VER >= 1600 && 0)\n// MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX\n// See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32\n#    include <immintrin.h>\n#    define CV_AVX 1\n#    if defined(_XCR_XFEATURE_ENABLED_MASK)\n#      define __xgetbv() _xgetbv(_XCR_XFEATURE_ENABLED_MASK)\n#    else\n#      define __xgetbv() 0\n#    endif\n#  endif\n#  if defined __AVX2__ || (defined _MSC_VER && _MSC_VER >= 1800 && 0)\n#    include <immintrin.h>\n#    define CV_AVX2 1\n#    if defined __FMA__\n#      define CV_FMA3 1\n#    endif\n#  endif\n#endif\n\n#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)\n# include <Intrin.h>\n# include \"arm_neon.h\"\n# define CV_NEON 1\n# define CPU_HAS_NEON_FEATURE (true)\n#elif defined(__ARM_NEON__) || (defined (__ARM_NEON) && defined(__aarch64__))\n#  include <arm_neon.h>\n#  define CV_NEON 1\n#endif\n\n#if defined __GNUC__ && defined __arm__ && (defined __ARM_PCS_VFP || defined __ARM_VFPV3__ || defined __ARM_NEON__) && !defined __SOFTFP__\n#  define CV_VFP 1\n#endif\n\n#endif // __CUDACC__\n\n#ifndef CV_POPCNT\n#define CV_POPCNT 0\n#endif\n#ifndef CV_MMX\n#  define CV_MMX 0\n#endif\n#ifndef CV_SSE\n#  define CV_SSE 0\n#endif\n#ifndef CV_SSE2\n#  define CV_SSE2 0\n#endif\n#ifndef CV_SSE3\n#  define CV_SSE3 0\n#endif\n#ifndef CV_SSSE3\n#  define CV_SSSE3 0\n#endif\n#ifndef CV_SSE4_1\n#  define CV_SSE4_1 0\n#endif\n#ifndef CV_SSE4_2\n#  define CV_SSE4_2 0\n#endif\n#ifndef CV_AVX\n#  define CV_AVX 0\n#endif\n#ifndef CV_AVX2\n#  define CV_AVX2 0\n#endif\n#ifndef CV_FMA3\n#  define CV_FMA3 0\n#endif\n#ifndef CV_AVX_512F\n#  define CV_AVX_512F 0\n#endif\n#ifndef CV_AVX_512BW\n#  define CV_AVX_512BW 0\n#endif\n#ifndef CV_AVX_512CD\n#  define CV_AVX_512CD 0\n#endif\n#ifndef CV_AVX_512DQ\n#  define CV_AVX_512DQ 0\n#endif\n#ifndef CV_AVX_512ER\n#  define CV_AVX_512ER 0\n#endif\n#ifndef CV_AVX_512IFMA512\n#  define CV_AVX_512IFMA512 0\n#endif\n#ifndef CV_AVX_512PF\n#  define CV_AVX_512PF 0\n#endif\n#ifndef CV_AVX_512VBMI\n#  define CV_AVX_512VBMI 0\n#endif\n#ifndef CV_AVX_512VL\n#  define CV_AVX_512VL 0\n#endif\n\n#ifndef CV_NEON\n#  define CV_NEON 0\n#endif\n\n#ifndef CV_VFP\n#  define CV_VFP 0\n#endif\n\n/* fundamental constants */\n#define CV_PI   3.1415926535897932384626433832795\n#define CV_2PI 6.283185307179586476925286766559\n#define CV_LOG2 0.69314718055994530941723212145818\n\ntypedef union Cv32suf\n{\n    int i;\n    unsigned u;\n    float f;\n}\nCv32suf;\n\ntypedef union Cv64suf\n{\n    int64 i;\n    uint64 u;\n    double f;\n}\nCv64suf;\n\n#define OPENCV_ABI_COMPATIBILITY 300\n\n#ifdef __OPENCV_BUILD\n#  define DISABLE_OPENCV_24_COMPATIBILITY\n#endif\n\n#if (defined WIN32 || defined _WIN32 || defined WINCE || defined __CYGWIN__) && defined CVAPI_EXPORTS\n#  define CV_EXPORTS __declspec(dllexport)\n#elif defined __GNUC__ && __GNUC__ >= 4\n#  define CV_EXPORTS __attribute__ ((visibility (\"default\")))\n#else\n#  define CV_EXPORTS\n#endif\n\n#ifndef CV_EXTERN_C\n#  ifdef __cplusplus\n#    define CV_EXTERN_C extern \"C\"\n#  else\n#    define CV_EXTERN_C\n#  endif\n#endif\n\n/* special informative macros for wrapper generators */\n#define CV_EXPORTS_W CV_EXPORTS\n#define CV_EXPORTS_W_SIMPLE CV_EXPORTS\n#define CV_EXPORTS_AS(synonym) CV_EXPORTS\n#define CV_EXPORTS_W_MAP CV_EXPORTS\n#define CV_IN_OUT\n#define CV_OUT\n#define CV_PROP\n#define CV_PROP_RW\n#define CV_WRAP\n#define CV_WRAP_AS(synonym)\n\n/****************************************************************************************\\\n*                                  Matrix type (Mat)                                     *\n\\****************************************************************************************/\n\n#define CV_CN_MAX     512\n#define CV_CN_SHIFT   3\n#define CV_DEPTH_MAX  (1 << CV_CN_SHIFT)\n\n#define CV_8U   0\n#define CV_8S   1\n#define CV_16U  2\n#define CV_16S  3\n#define CV_32S  4\n#define CV_32F  5\n#define CV_64F  6\n#define CV_USRTYPE1 7\n\n#define CV_MAT_DEPTH_MASK       (CV_DEPTH_MAX - 1)\n#define CV_MAT_DEPTH(flags)     ((flags) & CV_MAT_DEPTH_MASK)\n\n#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT))\n#define CV_MAKE_TYPE CV_MAKETYPE\n\n#define CV_8UC1 CV_MAKETYPE(CV_8U,1)\n#define CV_8UC2 CV_MAKETYPE(CV_8U,2)\n#define CV_8UC3 CV_MAKETYPE(CV_8U,3)\n#define CV_8UC4 CV_MAKETYPE(CV_8U,4)\n#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n))\n\n#define CV_8SC1 CV_MAKETYPE(CV_8S,1)\n#define CV_8SC2 CV_MAKETYPE(CV_8S,2)\n#define CV_8SC3 CV_MAKETYPE(CV_8S,3)\n#define CV_8SC4 CV_MAKETYPE(CV_8S,4)\n#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n))\n\n#define CV_16UC1 CV_MAKETYPE(CV_16U,1)\n#define CV_16UC2 CV_MAKETYPE(CV_16U,2)\n#define CV_16UC3 CV_MAKETYPE(CV_16U,3)\n#define CV_16UC4 CV_MAKETYPE(CV_16U,4)\n#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n))\n\n#define CV_16SC1 CV_MAKETYPE(CV_16S,1)\n#define CV_16SC2 CV_MAKETYPE(CV_16S,2)\n#define CV_16SC3 CV_MAKETYPE(CV_16S,3)\n#define CV_16SC4 CV_MAKETYPE(CV_16S,4)\n#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n))\n\n#define CV_32SC1 CV_MAKETYPE(CV_32S,1)\n#define CV_32SC2 CV_MAKETYPE(CV_32S,2)\n#define CV_32SC3 CV_MAKETYPE(CV_32S,3)\n#define CV_32SC4 CV_MAKETYPE(CV_32S,4)\n#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n))\n\n#define CV_32FC1 CV_MAKETYPE(CV_32F,1)\n#define CV_32FC2 CV_MAKETYPE(CV_32F,2)\n#define CV_32FC3 CV_MAKETYPE(CV_32F,3)\n#define CV_32FC4 CV_MAKETYPE(CV_32F,4)\n#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n))\n\n#define CV_64FC1 CV_MAKETYPE(CV_64F,1)\n#define CV_64FC2 CV_MAKETYPE(CV_64F,2)\n#define CV_64FC3 CV_MAKETYPE(CV_64F,3)\n#define CV_64FC4 CV_MAKETYPE(CV_64F,4)\n#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n))\n\n#define CV_MAT_CN_MASK          ((CV_CN_MAX - 1) << CV_CN_SHIFT)\n#define CV_MAT_CN(flags)        ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1)\n#define CV_MAT_TYPE_MASK        (CV_DEPTH_MAX*CV_CN_MAX - 1)\n#define CV_MAT_TYPE(flags)      ((flags) & CV_MAT_TYPE_MASK)\n#define CV_MAT_CONT_FLAG_SHIFT  14\n#define CV_MAT_CONT_FLAG        (1 << CV_MAT_CONT_FLAG_SHIFT)\n#define CV_IS_MAT_CONT(flags)   ((flags) & CV_MAT_CONT_FLAG)\n#define CV_IS_CONT_MAT          CV_IS_MAT_CONT\n#define CV_SUBMAT_FLAG_SHIFT    15\n#define CV_SUBMAT_FLAG          (1 << CV_SUBMAT_FLAG_SHIFT)\n#define CV_IS_SUBMAT(flags)     ((flags) & CV_MAT_SUBMAT_FLAG)\n\n/** Size of each channel item,\n   0x124489 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */\n#define CV_ELEM_SIZE1(type) \\\n    ((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15)\n\n/** 0x3a50 = 11 10 10 01 01 00 00 ~ array of log2(sizeof(arr_type_elem)) */\n#define CV_ELEM_SIZE(type) \\\n    (CV_MAT_CN(type) << ((((sizeof(size_t)/4+1)*16384|0x3a50) >> CV_MAT_DEPTH(type)*2) & 3))\n\n#ifndef MIN\n#  define MIN(a,b)  ((a) > (b) ? (b) : (a))\n#endif\n\n#ifndef MAX\n#  define MAX(a,b)  ((a) < (b) ? (b) : (a))\n#endif\n\n/****************************************************************************************\\\n*          exchange-add operation for atomic operations on reference counters            *\n\\****************************************************************************************/\n\n#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32)\n   // atomic increment on the linux version of the Intel(tm) compiler\n#  define CV_XADD(addr, delta) (int)_InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(addr)), delta)\n#elif defined __GNUC__\n#  if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__)\n#    ifdef __ATOMIC_ACQ_REL\n#      define CV_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL)\n#    else\n#      define CV_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), delta, 4)\n#    endif\n#  else\n#    if defined __ATOMIC_ACQ_REL && !defined __clang__\n       // version for gcc >= 4.7\n#      define CV_XADD(addr, delta) (int)__atomic_fetch_add((unsigned*)(addr), (unsigned)(delta), __ATOMIC_ACQ_REL)\n#    else\n#      define CV_XADD(addr, delta) (int)__sync_fetch_and_add((unsigned*)(addr), (unsigned)(delta))\n#    endif\n#  endif\n#elif defined _MSC_VER && !defined RC_INVOKED\n#  include <intrin.h>\n#  define CV_XADD(addr, delta) (int)_InterlockedExchangeAdd((long volatile*)addr, delta)\n#else\n   CV_INLINE CV_XADD(int* addr, int delta) { int tmp = *addr; *addr += delta; return tmp; }\n#endif\n\n\n/****************************************************************************************\\\n*                                  CV_NORETURN attribute                                 *\n\\****************************************************************************************/\n\n#ifndef CV_NORETURN\n#  if defined(__GNUC__)\n#    define CV_NORETURN __attribute__((__noreturn__))\n#  elif defined(_MSC_VER) && (_MSC_VER >= 1300)\n#    define CV_NORETURN __declspec(noreturn)\n#  else\n#    define CV_NORETURN /* nothing by default */\n#  endif\n#endif\n\n\n/****************************************************************************************\\\n*                                    C++ Move semantics                                  *\n\\****************************************************************************************/\n\n#ifndef CV_CXX_MOVE_SEMANTICS\n#  if __cplusplus >= 201103L || defined(__GXX_EXPERIMENTAL_CXX0X__) || defined(_MSC_VER) && _MSC_VER >= 1600\n#    define CV_CXX_MOVE_SEMANTICS 1\n#  elif defined(__clang)\n#    if __has_feature(cxx_rvalue_references)\n#      define CV_CXX_MOVE_SEMANTICS 1\n#    endif\n#  endif\n#else\n#  if CV_CXX_MOVE_SEMANTICS == 0\n#    undef CV_CXX_MOVE_SEMANTICS\n#  endif\n#endif\n\n//! @}\n\n#endif // __OPENCV_CORE_CVDEF_H__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cvstd.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_CVSTD_HPP__\n#define __OPENCV_CORE_CVSTD_HPP__\n\n#ifndef __cplusplus\n#  error cvstd.hpp header must be compiled as C++\n#endif\n\n#include \"opencv2/core/cvdef.h\"\n\n#include <cstddef>\n#include <cstring>\n#include <cctype>\n\n#ifndef OPENCV_NOSTL\n#  include <string>\n#endif\n\n// import useful primitives from stl\n#ifndef OPENCV_NOSTL_TRANSITIONAL\n#  include <algorithm>\n#  include <utility>\n#  include <cstdlib> //for abs(int)\n#  include <cmath>\n\nnamespace cv\n{\n    using std::min;\n    using std::max;\n    using std::abs;\n    using std::swap;\n    using std::sqrt;\n    using std::exp;\n    using std::pow;\n    using std::log;\n}\n\nnamespace std\n{\n    static inline uchar abs(uchar a) { return a; }\n    static inline ushort abs(ushort a) { return a; }\n    static inline unsigned abs(unsigned a) { return a; }\n    static inline uint64 abs(uint64 a) { return a; }\n}\n\n#else\nnamespace cv\n{\n    template<typename T> static inline T min(T a, T b) { return a < b ? a : b; }\n    template<typename T> static inline T max(T a, T b) { return a > b ? a : b; }\n    template<typename T> static inline T abs(T a) { return a < 0 ? -a : a; }\n    template<typename T> static inline void swap(T& a, T& b) { T tmp = a; a = b; b = tmp; }\n\n    template<> inline uchar abs(uchar a) { return a; }\n    template<> inline ushort abs(ushort a) { return a; }\n    template<> inline unsigned abs(unsigned a) { return a; }\n    template<> inline uint64 abs(uint64 a) { return a; }\n}\n#endif\n\nnamespace cv {\n\n//! @addtogroup core_utils\n//! @{\n\n//////////////////////////// memory management functions ////////////////////////////\n\n/** @brief Allocates an aligned memory buffer.\n\nThe function allocates the buffer of the specified size and returns it. When the buffer size is 16\nbytes or more, the returned buffer is aligned to 16 bytes.\n@param bufSize Allocated buffer size.\n */\nCV_EXPORTS void* fastMalloc(size_t bufSize);\n\n/** @brief Deallocates a memory buffer.\n\nThe function deallocates the buffer allocated with fastMalloc . If NULL pointer is passed, the\nfunction does nothing. C version of the function clears the pointer *pptr* to avoid problems with\ndouble memory deallocation.\n@param ptr Pointer to the allocated buffer.\n */\nCV_EXPORTS void fastFree(void* ptr);\n\n/*!\n  The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree()\n*/\ntemplate<typename _Tp> class Allocator\n{\npublic:\n    typedef _Tp value_type;\n    typedef value_type* pointer;\n    typedef const value_type* const_pointer;\n    typedef value_type& reference;\n    typedef const value_type& const_reference;\n    typedef size_t size_type;\n    typedef ptrdiff_t difference_type;\n    template<typename U> class rebind { typedef Allocator<U> other; };\n\n    explicit Allocator() {}\n    ~Allocator() {}\n    explicit Allocator(Allocator const&) {}\n    template<typename U>\n    explicit Allocator(Allocator<U> const&) {}\n\n    // address\n    pointer address(reference r) { return &r; }\n    const_pointer address(const_reference r) { return &r; }\n\n    pointer allocate(size_type count, const void* =0) { return reinterpret_cast<pointer>(fastMalloc(count * sizeof (_Tp))); }\n    void deallocate(pointer p, size_type) { fastFree(p); }\n\n    void construct(pointer p, const _Tp& v) { new(static_cast<void*>(p)) _Tp(v); }\n    void destroy(pointer p) { p->~_Tp(); }\n\n    size_type max_size() const { return cv::max(static_cast<_Tp>(-1)/sizeof(_Tp), 1); }\n};\n\n//! @} core_utils\n\n//! @cond IGNORED\n\nnamespace detail\n{\n\n// Metafunction to avoid taking a reference to void.\ntemplate<typename T>\nstruct RefOrVoid { typedef T& type; };\n\ntemplate<>\nstruct RefOrVoid<void>{ typedef void type; };\n\ntemplate<>\nstruct RefOrVoid<const void>{ typedef const void type; };\n\ntemplate<>\nstruct RefOrVoid<volatile void>{ typedef volatile void type; };\n\ntemplate<>\nstruct RefOrVoid<const volatile void>{ typedef const volatile void type; };\n\n// This class would be private to Ptr, if it didn't have to be a non-template.\nstruct PtrOwner;\n\n}\n\ntemplate<typename Y>\nstruct DefaultDeleter\n{\n    void operator () (Y* p) const;\n};\n\n//! @endcond\n\n//! @addtogroup core_basic\n//! @{\n\n/** @brief Template class for smart pointers with shared ownership\n\nA Ptr\\<T\\> pretends to be a pointer to an object of type T. Unlike an ordinary pointer, however, the\nobject will be automatically cleaned up once all Ptr instances pointing to it are destroyed.\n\nPtr is similar to boost::shared_ptr that is part of the Boost library\n(<http://www.boost.org/doc/libs/release/libs/smart_ptr/shared_ptr.htm>) and std::shared_ptr from\nthe [C++11](http://en.wikipedia.org/wiki/C++11) standard.\n\nThis class provides the following advantages:\n-   Default constructor, copy constructor, and assignment operator for an arbitrary C++ class or C\n    structure. For some objects, like files, windows, mutexes, sockets, and others, a copy\n    constructor or an assignment operator are difficult to define. For some other objects, like\n    complex classifiers in OpenCV, copy constructors are absent and not easy to implement. Finally,\n    some of complex OpenCV and your own data structures may be written in C. However, copy\n    constructors and default constructors can simplify programming a lot. Besides, they are often\n    required (for example, by STL containers). By using a Ptr to such an object instead of the\n    object itself, you automatically get all of the necessary constructors and the assignment\n    operator.\n-   *O(1)* complexity of the above-mentioned operations. While some structures, like std::vector,\n    provide a copy constructor and an assignment operator, the operations may take a considerable\n    amount of time if the data structures are large. But if the structures are put into a Ptr, the\n    overhead is small and independent of the data size.\n-   Automatic and customizable cleanup, even for C structures. See the example below with FILE\\*.\n-   Heterogeneous collections of objects. The standard STL and most other C++ and OpenCV containers\n    can store only objects of the same type and the same size. The classical solution to store\n    objects of different types in the same container is to store pointers to the base class (Base\\*)\n    instead but then you lose the automatic memory management. Again, by using Ptr\\<Base\\> instead\n    of raw pointers, you can solve the problem.\n\nA Ptr is said to *own* a pointer - that is, for each Ptr there is a pointer that will be deleted\nonce all Ptr instances that own it are destroyed. The owned pointer may be null, in which case\nnothing is deleted. Each Ptr also *stores* a pointer. The stored pointer is the pointer the Ptr\npretends to be; that is, the one you get when you use Ptr::get or the conversion to T\\*. It's\nusually the same as the owned pointer, but if you use casts or the general shared-ownership\nconstructor, the two may diverge: the Ptr will still own the original pointer, but will itself point\nto something else.\n\nThe owned pointer is treated as a black box. The only thing Ptr needs to know about it is how to\ndelete it. This knowledge is encapsulated in the *deleter* - an auxiliary object that is associated\nwith the owned pointer and shared between all Ptr instances that own it. The default deleter is an\ninstance of DefaultDeleter, which uses the standard C++ delete operator; as such it will work with\nany pointer allocated with the standard new operator.\n\nHowever, if the pointer must be deleted in a different way, you must specify a custom deleter upon\nPtr construction. A deleter is simply a callable object that accepts the pointer as its sole\nargument. For example, if you want to wrap FILE, you may do so as follows:\n@code\n    Ptr<FILE> f(fopen(\"myfile.txt\", \"w\"), fclose);\n    if(!f) throw ...;\n    fprintf(f, ....);\n    ...\n    // the file will be closed automatically by f's destructor.\n@endcode\nAlternatively, if you want all pointers of a particular type to be deleted the same way, you can\nspecialize DefaultDeleter<T>::operator() for that type, like this:\n@code\n    namespace cv {\n    template<> void DefaultDeleter<FILE>::operator ()(FILE * obj) const\n    {\n        fclose(obj);\n    }\n    }\n@endcode\nFor convenience, the following types from the OpenCV C API already have such a specialization that\ncalls the appropriate release function:\n-   CvCapture\n-   CvFileStorage\n-   CvHaarClassifierCascade\n-   CvMat\n-   CvMatND\n-   CvMemStorage\n-   CvSparseMat\n-   CvVideoWriter\n-   IplImage\n@note The shared ownership mechanism is implemented with reference counting. As such, cyclic\nownership (e.g. when object a contains a Ptr to object b, which contains a Ptr to object a) will\nlead to all involved objects never being cleaned up. Avoid such situations.\n@note It is safe to concurrently read (but not write) a Ptr instance from multiple threads and\ntherefore it is normally safe to use it in multi-threaded applications. The same is true for Mat and\nother C++ OpenCV classes that use internal reference counts.\n*/\ntemplate<typename T>\nstruct Ptr\n{\n    /** Generic programming support. */\n    typedef T element_type;\n\n    /** The default constructor creates a null Ptr - one that owns and stores a null pointer.\n    */\n    Ptr();\n\n    /**\n    If p is null, these are equivalent to the default constructor.\n    Otherwise, these constructors assume ownership of p - that is, the created Ptr owns and stores p\n    and assumes it is the sole owner of it. Don't use them if p is already owned by another Ptr, or\n    else p will get deleted twice.\n    With the first constructor, DefaultDeleter\\<Y\\>() becomes the associated deleter (so p will\n    eventually be deleted with the standard delete operator). Y must be a complete type at the point\n    of invocation.\n    With the second constructor, d becomes the associated deleter.\n    Y\\* must be convertible to T\\*.\n    @param p Pointer to own.\n    @note It is often easier to use makePtr instead.\n     */\n    template<typename Y>\n#ifdef DISABLE_OPENCV_24_COMPATIBILITY\n    explicit\n#endif\n    Ptr(Y* p);\n\n    /** @overload\n    @param d Deleter to use for the owned pointer.\n    @param p Pointer to own.\n    */\n    template<typename Y, typename D>\n    Ptr(Y* p, D d);\n\n    /**\n    These constructors create a Ptr that shares ownership with another Ptr - that is, own the same\n    pointer as o.\n    With the first two, the same pointer is stored, as well; for the second, Y\\* must be convertible\n    to T\\*.\n    With the third, p is stored, and Y may be any type. This constructor allows to have completely\n    unrelated owned and stored pointers, and should be used with care to avoid confusion. A relatively\n    benign use is to create a non-owning Ptr, like this:\n    @code\n        ptr = Ptr<T>(Ptr<T>(), dont_delete_me); // owns nothing; will not delete the pointer.\n    @endcode\n    @param o Ptr to share ownership with.\n    */\n    Ptr(const Ptr& o);\n\n    /** @overload\n    @param o Ptr to share ownership with.\n    */\n    template<typename Y>\n    Ptr(const Ptr<Y>& o);\n\n    /** @overload\n    @param o Ptr to share ownership with.\n    @param p Pointer to store.\n    */\n    template<typename Y>\n    Ptr(const Ptr<Y>& o, T* p);\n\n    /** The destructor is equivalent to calling Ptr::release. */\n    ~Ptr();\n\n    /**\n    Assignment replaces the current Ptr instance with one that owns and stores same pointers as o and\n    then destroys the old instance.\n    @param o Ptr to share ownership with.\n     */\n    Ptr& operator = (const Ptr& o);\n\n    /** @overload */\n    template<typename Y>\n    Ptr& operator = (const Ptr<Y>& o);\n\n    /** If no other Ptr instance owns the owned pointer, deletes it with the associated deleter. Then sets\n    both the owned and the stored pointers to NULL.\n    */\n    void release();\n\n    /**\n    `ptr.reset(...)` is equivalent to `ptr = Ptr<T>(...)`.\n    @param p Pointer to own.\n    */\n    template<typename Y>\n    void reset(Y* p);\n\n    /** @overload\n    @param d Deleter to use for the owned pointer.\n    @param p Pointer to own.\n    */\n    template<typename Y, typename D>\n    void reset(Y* p, D d);\n\n    /**\n    Swaps the owned and stored pointers (and deleters, if any) of this and o.\n    @param o Ptr to swap with.\n    */\n    void swap(Ptr& o);\n\n    /** Returns the stored pointer. */\n    T* get() const;\n\n    /** Ordinary pointer emulation. */\n    typename detail::RefOrVoid<T>::type operator * () const;\n\n    /** Ordinary pointer emulation. */\n    T* operator -> () const;\n\n    /** Equivalent to get(). */\n    operator T* () const;\n\n    /** ptr.empty() is equivalent to `!ptr.get()`. */\n    bool empty() const;\n\n    /** Returns a Ptr that owns the same pointer as this, and stores the same\n       pointer as this, except converted via static_cast to Y*.\n    */\n    template<typename Y>\n    Ptr<Y> staticCast() const;\n\n    /** Ditto for const_cast. */\n    template<typename Y>\n    Ptr<Y> constCast() const;\n\n    /** Ditto for dynamic_cast. */\n    template<typename Y>\n    Ptr<Y> dynamicCast() const;\n\n#ifdef CV_CXX_MOVE_SEMANTICS\n    Ptr(Ptr&& o);\n    Ptr& operator = (Ptr&& o);\n#endif\n\nprivate:\n    detail::PtrOwner* owner;\n    T* stored;\n\n    template<typename Y>\n    friend struct Ptr; // have to do this for the cross-type copy constructor\n};\n\n/** Equivalent to ptr1.swap(ptr2). Provided to help write generic algorithms. */\ntemplate<typename T>\nvoid swap(Ptr<T>& ptr1, Ptr<T>& ptr2);\n\n/** Return whether ptr1.get() and ptr2.get() are equal and not equal, respectively. */\ntemplate<typename T>\nbool operator == (const Ptr<T>& ptr1, const Ptr<T>& ptr2);\ntemplate<typename T>\nbool operator != (const Ptr<T>& ptr1, const Ptr<T>& ptr2);\n\n/** `makePtr<T>(...)` is equivalent to `Ptr<T>(new T(...))`. It is shorter than the latter, and it's\nmarginally safer than using a constructor or Ptr::reset, since it ensures that the owned pointer\nis new and thus not owned by any other Ptr instance.\nUnfortunately, perfect forwarding is impossible to implement in C++03, and so makePtr is limited\nto constructors of T that have up to 10 arguments, none of which are non-const references.\n */\ntemplate<typename T>\nPtr<T> makePtr();\n/** @overload */\ntemplate<typename T, typename A1>\nPtr<T> makePtr(const A1& a1);\n/** @overload */\ntemplate<typename T, typename A1, typename A2>\nPtr<T> makePtr(const A1& a1, const A2& a2);\n/** @overload */\ntemplate<typename T, typename A1, typename A2, typename A3>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3);\n/** @overload */\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4);\n/** @overload */\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5);\n/** @overload */\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6);\n/** @overload */\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7);\n/** @overload */\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8);\n/** @overload */\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9);\n/** @overload */\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9, typename A10>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10);\n\n//////////////////////////////// string class ////////////////////////////////\n\nclass CV_EXPORTS FileNode; //for string constructor from FileNode\n\nclass CV_EXPORTS String\n{\npublic:\n    typedef char value_type;\n    typedef char& reference;\n    typedef const char& const_reference;\n    typedef char* pointer;\n    typedef const char* const_pointer;\n    typedef ptrdiff_t difference_type;\n    typedef size_t size_type;\n    typedef char* iterator;\n    typedef const char* const_iterator;\n\n    static const size_t npos = size_t(-1);\n\n    explicit String();\n    String(const String& str);\n    String(const String& str, size_t pos, size_t len = npos);\n    String(const char* s);\n    String(const char* s, size_t n);\n    String(size_t n, char c);\n    String(const char* first, const char* last);\n    template<typename Iterator> String(Iterator first, Iterator last);\n    explicit String(const FileNode& fn);\n    ~String();\n\n    String& operator=(const String& str);\n    String& operator=(const char* s);\n    String& operator=(char c);\n\n    String& operator+=(const String& str);\n    String& operator+=(const char* s);\n    String& operator+=(char c);\n\n    size_t size() const;\n    size_t length() const;\n\n    char operator[](size_t idx) const;\n    char operator[](int idx) const;\n\n    const char* begin() const;\n    const char* end() const;\n\n    const char* c_str() const;\n\n    bool empty() const;\n    void clear();\n\n    int compare(const char* s) const;\n    int compare(const String& str) const;\n\n    void swap(String& str);\n    String substr(size_t pos = 0, size_t len = npos) const;\n\n    size_t find(const char* s, size_t pos, size_t n) const;\n    size_t find(char c, size_t pos = 0) const;\n    size_t find(const String& str, size_t pos = 0) const;\n    size_t find(const char* s, size_t pos = 0) const;\n\n    size_t rfind(const char* s, size_t pos, size_t n) const;\n    size_t rfind(char c, size_t pos = npos) const;\n    size_t rfind(const String& str, size_t pos = npos) const;\n    size_t rfind(const char* s, size_t pos = npos) const;\n\n    size_t find_first_of(const char* s, size_t pos, size_t n) const;\n    size_t find_first_of(char c, size_t pos = 0) const;\n    size_t find_first_of(const String& str, size_t pos = 0) const;\n    size_t find_first_of(const char* s, size_t pos = 0) const;\n\n    size_t find_last_of(const char* s, size_t pos, size_t n) const;\n    size_t find_last_of(char c, size_t pos = npos) const;\n    size_t find_last_of(const String& str, size_t pos = npos) const;\n    size_t find_last_of(const char* s, size_t pos = npos) const;\n\n    friend String operator+ (const String& lhs, const String& rhs);\n    friend String operator+ (const String& lhs, const char*   rhs);\n    friend String operator+ (const char*   lhs, const String& rhs);\n    friend String operator+ (const String& lhs, char          rhs);\n    friend String operator+ (char          lhs, const String& rhs);\n\n    String toLowerCase() const;\n\n#ifndef OPENCV_NOSTL\n    String(const std::string& str);\n    String(const std::string& str, size_t pos, size_t len = npos);\n    String& operator=(const std::string& str);\n    String& operator+=(const std::string& str);\n    operator std::string() const;\n\n    friend String operator+ (const String& lhs, const std::string& rhs);\n    friend String operator+ (const std::string& lhs, const String& rhs);\n#endif\n\nprivate:\n    char*  cstr_;\n    size_t len_;\n\n    char* allocate(size_t len); // len without trailing 0\n    void deallocate();\n\n    String(int); // disabled and invalid. Catch invalid usages like, commandLineParser.has(0) problem\n};\n\n//! @} core_basic\n\n////////////////////////// cv::String implementation /////////////////////////\n\n//! @cond IGNORED\n\ninline\nString::String()\n    : cstr_(0), len_(0)\n{}\n\ninline\nString::String(const String& str)\n    : cstr_(str.cstr_), len_(str.len_)\n{\n    if (cstr_)\n        CV_XADD(((int*)cstr_)-1, 1);\n}\n\ninline\nString::String(const String& str, size_t pos, size_t len)\n    : cstr_(0), len_(0)\n{\n    pos = min(pos, str.len_);\n    len = min(str.len_ - pos, len);\n    if (!len) return;\n    if (len == str.len_)\n    {\n        CV_XADD(((int*)str.cstr_)-1, 1);\n        cstr_ = str.cstr_;\n        len_ = str.len_;\n        return;\n    }\n    memcpy(allocate(len), str.cstr_ + pos, len);\n}\n\ninline\nString::String(const char* s)\n    : cstr_(0), len_(0)\n{\n    if (!s) return;\n    size_t len = strlen(s);\n    memcpy(allocate(len), s, len);\n}\n\ninline\nString::String(const char* s, size_t n)\n    : cstr_(0), len_(0)\n{\n    if (!n) return;\n    memcpy(allocate(n), s, n);\n}\n\ninline\nString::String(size_t n, char c)\n    : cstr_(0), len_(0)\n{\n    memset(allocate(n), c, n);\n}\n\ninline\nString::String(const char* first, const char* last)\n    : cstr_(0), len_(0)\n{\n    size_t len = (size_t)(last - first);\n    memcpy(allocate(len), first, len);\n}\n\ntemplate<typename Iterator> inline\nString::String(Iterator first, Iterator last)\n    : cstr_(0), len_(0)\n{\n    size_t len = (size_t)(last - first);\n    char* str = allocate(len);\n    while (first != last)\n    {\n        *str++ = *first;\n        ++first;\n    }\n}\n\ninline\nString::~String()\n{\n    deallocate();\n}\n\ninline\nString& String::operator=(const String& str)\n{\n    if (&str == this) return *this;\n\n    deallocate();\n    if (str.cstr_) CV_XADD(((int*)str.cstr_)-1, 1);\n    cstr_ = str.cstr_;\n    len_ = str.len_;\n    return *this;\n}\n\ninline\nString& String::operator=(const char* s)\n{\n    deallocate();\n    if (!s) return *this;\n    size_t len = strlen(s);\n    memcpy(allocate(len), s, len);\n    return *this;\n}\n\ninline\nString& String::operator=(char c)\n{\n    deallocate();\n    allocate(1)[0] = c;\n    return *this;\n}\n\ninline\nString& String::operator+=(const String& str)\n{\n    *this = *this + str;\n    return *this;\n}\n\ninline\nString& String::operator+=(const char* s)\n{\n    *this = *this + s;\n    return *this;\n}\n\ninline\nString& String::operator+=(char c)\n{\n    *this = *this + c;\n    return *this;\n}\n\ninline\nsize_t String::size() const\n{\n    return len_;\n}\n\ninline\nsize_t String::length() const\n{\n    return len_;\n}\n\ninline\nchar String::operator[](size_t idx) const\n{\n    return cstr_[idx];\n}\n\ninline\nchar String::operator[](int idx) const\n{\n    return cstr_[idx];\n}\n\ninline\nconst char* String::begin() const\n{\n    return cstr_;\n}\n\ninline\nconst char* String::end() const\n{\n    return len_ ? cstr_ + 1 : 0;\n}\n\ninline\nbool String::empty() const\n{\n    return len_ == 0;\n}\n\ninline\nconst char* String::c_str() const\n{\n    return cstr_ ? cstr_ : \"\";\n}\n\ninline\nvoid String::swap(String& str)\n{\n    cv::swap(cstr_, str.cstr_);\n    cv::swap(len_, str.len_);\n}\n\ninline\nvoid String::clear()\n{\n    deallocate();\n}\n\ninline\nint String::compare(const char* s) const\n{\n    if (cstr_ == s) return 0;\n    return strcmp(c_str(), s);\n}\n\ninline\nint String::compare(const String& str) const\n{\n    if (cstr_ == str.cstr_) return 0;\n    return strcmp(c_str(), str.c_str());\n}\n\ninline\nString String::substr(size_t pos, size_t len) const\n{\n    return String(*this, pos, len);\n}\n\ninline\nsize_t String::find(const char* s, size_t pos, size_t n) const\n{\n    if (n == 0 || pos + n > len_) return npos;\n    const char* lmax = cstr_ + len_ - n;\n    for (const char* i = cstr_ + pos; i <= lmax; ++i)\n    {\n        size_t j = 0;\n        while (j < n && s[j] == i[j]) ++j;\n        if (j == n) return (size_t)(i - cstr_);\n    }\n    return npos;\n}\n\ninline\nsize_t String::find(char c, size_t pos) const\n{\n    return find(&c, pos, 1);\n}\n\ninline\nsize_t String::find(const String& str, size_t pos) const\n{\n    return find(str.c_str(), pos, str.len_);\n}\n\ninline\nsize_t String::find(const char* s, size_t pos) const\n{\n    if (pos >= len_ || !s[0]) return npos;\n    const char* lmax = cstr_ + len_;\n    for (const char* i = cstr_ + pos; i < lmax; ++i)\n    {\n        size_t j = 0;\n        while (s[j] && s[j] == i[j])\n        {   if(i + j >= lmax) return npos;\n            ++j;\n        }\n        if (!s[j]) return (size_t)(i - cstr_);\n    }\n    return npos;\n}\n\ninline\nsize_t String::rfind(const char* s, size_t pos, size_t n) const\n{\n    if (n > len_) return npos;\n    if (pos > len_ - n) pos = len_ - n;\n    for (const char* i = cstr_ + pos; i >= cstr_; --i)\n    {\n        size_t j = 0;\n        while (j < n && s[j] == i[j]) ++j;\n        if (j == n) return (size_t)(i - cstr_);\n    }\n    return npos;\n}\n\ninline\nsize_t String::rfind(char c, size_t pos) const\n{\n    return rfind(&c, pos, 1);\n}\n\ninline\nsize_t String::rfind(const String& str, size_t pos) const\n{\n    return rfind(str.c_str(), pos, str.len_);\n}\n\ninline\nsize_t String::rfind(const char* s, size_t pos) const\n{\n    return rfind(s, pos, strlen(s));\n}\n\ninline\nsize_t String::find_first_of(const char* s, size_t pos, size_t n) const\n{\n    if (n == 0 || pos + n > len_) return npos;\n    const char* lmax = cstr_ + len_;\n    for (const char* i = cstr_ + pos; i < lmax; ++i)\n    {\n        for (size_t j = 0; j < n; ++j)\n            if (s[j] == *i)\n                return (size_t)(i - cstr_);\n    }\n    return npos;\n}\n\ninline\nsize_t String::find_first_of(char c, size_t pos) const\n{\n    return find_first_of(&c, pos, 1);\n}\n\ninline\nsize_t String::find_first_of(const String& str, size_t pos) const\n{\n    return find_first_of(str.c_str(), pos, str.len_);\n}\n\ninline\nsize_t String::find_first_of(const char* s, size_t pos) const\n{\n    if (len_ == 0) return npos;\n    if (pos >= len_ || !s[0]) return npos;\n    const char* lmax = cstr_ + len_;\n    for (const char* i = cstr_ + pos; i < lmax; ++i)\n    {\n        for (size_t j = 0; s[j]; ++j)\n            if (s[j] == *i)\n                return (size_t)(i - cstr_);\n    }\n    return npos;\n}\n\ninline\nsize_t String::find_last_of(const char* s, size_t pos, size_t n) const\n{\n    if (len_ == 0) return npos;\n    if (pos >= len_) pos = len_ - 1;\n    for (const char* i = cstr_ + pos; i >= cstr_; --i)\n    {\n        for (size_t j = 0; j < n; ++j)\n            if (s[j] == *i)\n                return (size_t)(i - cstr_);\n    }\n    return npos;\n}\n\ninline\nsize_t String::find_last_of(char c, size_t pos) const\n{\n    return find_last_of(&c, pos, 1);\n}\n\ninline\nsize_t String::find_last_of(const String& str, size_t pos) const\n{\n    return find_last_of(str.c_str(), pos, str.len_);\n}\n\ninline\nsize_t String::find_last_of(const char* s, size_t pos) const\n{\n    if (len_ == 0) return npos;\n    if (pos >= len_) pos = len_ - 1;\n    for (const char* i = cstr_ + pos; i >= cstr_; --i)\n    {\n        for (size_t j = 0; s[j]; ++j)\n            if (s[j] == *i)\n                return (size_t)(i - cstr_);\n    }\n    return npos;\n}\n\ninline\nString String::toLowerCase() const\n{\n    String res(cstr_, len_);\n\n    for (size_t i = 0; i < len_; ++i)\n        res.cstr_[i] = (char) ::tolower(cstr_[i]);\n\n    return res;\n}\n\n//! @endcond\n\n// ************************* cv::String non-member functions *************************\n\n//! @relates cv::String\n//! @{\n\ninline\nString operator + (const String& lhs, const String& rhs)\n{\n    String s;\n    s.allocate(lhs.len_ + rhs.len_);\n    memcpy(s.cstr_, lhs.cstr_, lhs.len_);\n    memcpy(s.cstr_ + lhs.len_, rhs.cstr_, rhs.len_);\n    return s;\n}\n\ninline\nString operator + (const String& lhs, const char* rhs)\n{\n    String s;\n    size_t rhslen = strlen(rhs);\n    s.allocate(lhs.len_ + rhslen);\n    memcpy(s.cstr_, lhs.cstr_, lhs.len_);\n    memcpy(s.cstr_ + lhs.len_, rhs, rhslen);\n    return s;\n}\n\ninline\nString operator + (const char* lhs, const String& rhs)\n{\n    String s;\n    size_t lhslen = strlen(lhs);\n    s.allocate(lhslen + rhs.len_);\n    memcpy(s.cstr_, lhs, lhslen);\n    memcpy(s.cstr_ + lhslen, rhs.cstr_, rhs.len_);\n    return s;\n}\n\ninline\nString operator + (const String& lhs, char rhs)\n{\n    String s;\n    s.allocate(lhs.len_ + 1);\n    memcpy(s.cstr_, lhs.cstr_, lhs.len_);\n    s.cstr_[lhs.len_] = rhs;\n    return s;\n}\n\ninline\nString operator + (char lhs, const String& rhs)\n{\n    String s;\n    s.allocate(rhs.len_ + 1);\n    s.cstr_[0] = lhs;\n    memcpy(s.cstr_ + 1, rhs.cstr_, rhs.len_);\n    return s;\n}\n\nstatic inline bool operator== (const String& lhs, const String& rhs) { return 0 == lhs.compare(rhs); }\nstatic inline bool operator== (const char*   lhs, const String& rhs) { return 0 == rhs.compare(lhs); }\nstatic inline bool operator== (const String& lhs, const char*   rhs) { return 0 == lhs.compare(rhs); }\nstatic inline bool operator!= (const String& lhs, const String& rhs) { return 0 != lhs.compare(rhs); }\nstatic inline bool operator!= (const char*   lhs, const String& rhs) { return 0 != rhs.compare(lhs); }\nstatic inline bool operator!= (const String& lhs, const char*   rhs) { return 0 != lhs.compare(rhs); }\nstatic inline bool operator<  (const String& lhs, const String& rhs) { return lhs.compare(rhs) <  0; }\nstatic inline bool operator<  (const char*   lhs, const String& rhs) { return rhs.compare(lhs) >  0; }\nstatic inline bool operator<  (const String& lhs, const char*   rhs) { return lhs.compare(rhs) <  0; }\nstatic inline bool operator<= (const String& lhs, const String& rhs) { return lhs.compare(rhs) <= 0; }\nstatic inline bool operator<= (const char*   lhs, const String& rhs) { return rhs.compare(lhs) >= 0; }\nstatic inline bool operator<= (const String& lhs, const char*   rhs) { return lhs.compare(rhs) <= 0; }\nstatic inline bool operator>  (const String& lhs, const String& rhs) { return lhs.compare(rhs) >  0; }\nstatic inline bool operator>  (const char*   lhs, const String& rhs) { return rhs.compare(lhs) <  0; }\nstatic inline bool operator>  (const String& lhs, const char*   rhs) { return lhs.compare(rhs) >  0; }\nstatic inline bool operator>= (const String& lhs, const String& rhs) { return lhs.compare(rhs) >= 0; }\nstatic inline bool operator>= (const char*   lhs, const String& rhs) { return rhs.compare(lhs) <= 0; }\nstatic inline bool operator>= (const String& lhs, const char*   rhs) { return lhs.compare(rhs) >= 0; }\n\n//! @} relates cv::String\n\n} // cv\n\n#ifndef OPENCV_NOSTL_TRANSITIONAL\nnamespace std\n{\n    static inline void swap(cv::String& a, cv::String& b) { a.swap(b); }\n}\n#else\nnamespace cv\n{\n    template<> inline\n    void swap<cv::String>(cv::String& a, cv::String& b)\n    {\n        a.swap(b);\n    }\n}\n#endif\n\n#include \"opencv2/core/ptr.inl.hpp\"\n\n#endif //__OPENCV_CORE_CVSTD_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/cvstd.inl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_CVSTDINL_HPP__\n#define __OPENCV_CORE_CVSTDINL_HPP__\n\n#ifndef OPENCV_NOSTL\n#  include <complex>\n#  include <ostream>\n#endif\n\n//! @cond IGNORED\n\nnamespace cv\n{\n#ifndef OPENCV_NOSTL\n\ntemplate<typename _Tp> class DataType< std::complex<_Tp> >\n{\npublic:\n    typedef std::complex<_Tp>  value_type;\n    typedef value_type         work_type;\n    typedef _Tp                channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = 2,\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels) };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\ninline\nString::String(const std::string& str)\n    : cstr_(0), len_(0)\n{\n    if (!str.empty())\n    {\n        size_t len = str.size();\n        memcpy(allocate(len), str.c_str(), len);\n    }\n}\n\ninline\nString::String(const std::string& str, size_t pos, size_t len)\n    : cstr_(0), len_(0)\n{\n    size_t strlen = str.size();\n    pos = min(pos, strlen);\n    len = min(strlen - pos, len);\n    if (!len) return;\n    memcpy(allocate(len), str.c_str() + pos, len);\n}\n\ninline\nString& String::operator = (const std::string& str)\n{\n    deallocate();\n    if (!str.empty())\n    {\n        size_t len = str.size();\n        memcpy(allocate(len), str.c_str(), len);\n    }\n    return *this;\n}\n\ninline\nString& String::operator += (const std::string& str)\n{\n    *this = *this + str;\n    return *this;\n}\n\ninline\nString::operator std::string() const\n{\n    return std::string(cstr_, len_);\n}\n\ninline\nString operator + (const String& lhs, const std::string& rhs)\n{\n    String s;\n    size_t rhslen = rhs.size();\n    s.allocate(lhs.len_ + rhslen);\n    memcpy(s.cstr_, lhs.cstr_, lhs.len_);\n    memcpy(s.cstr_ + lhs.len_, rhs.c_str(), rhslen);\n    return s;\n}\n\ninline\nString operator + (const std::string& lhs, const String& rhs)\n{\n    String s;\n    size_t lhslen = lhs.size();\n    s.allocate(lhslen + rhs.len_);\n    memcpy(s.cstr_, lhs.c_str(), lhslen);\n    memcpy(s.cstr_ + lhslen, rhs.cstr_, rhs.len_);\n    return s;\n}\n\ninline\nFileNode::operator std::string() const\n{\n    String value;\n    read(*this, value, value);\n    return value;\n}\n\ntemplate<> inline\nvoid operator >> (const FileNode& n, std::string& value)\n{\n    String val;\n    read(n, val, val);\n    value = val;\n}\n\ntemplate<> inline\nFileStorage& operator << (FileStorage& fs, const std::string& value)\n{\n    return fs << cv::String(value);\n}\n\nstatic inline\nstd::ostream& operator << (std::ostream& os, const String& str)\n{\n    return os << str.c_str();\n}\n\nstatic inline\nstd::ostream& operator << (std::ostream& out, Ptr<Formatted> fmtd)\n{\n    fmtd->reset();\n    for(const char* str = fmtd->next(); str; str = fmtd->next())\n        out << str;\n    return out;\n}\n\nstatic inline\nstd::ostream& operator << (std::ostream& out, const Mat& mtx)\n{\n    return out << Formatter::get()->format(mtx);\n}\n\ntemplate<typename _Tp> static inline\nstd::ostream& operator << (std::ostream& out, const std::vector<Point_<_Tp> >& vec)\n{\n    return out << Formatter::get()->format(Mat(vec));\n}\n\n\ntemplate<typename _Tp> static inline\nstd::ostream& operator << (std::ostream& out, const std::vector<Point3_<_Tp> >& vec)\n{\n    return out << Formatter::get()->format(Mat(vec));\n}\n\n\ntemplate<typename _Tp, int m, int n> static inline\nstd::ostream& operator << (std::ostream& out, const Matx<_Tp, m, n>& matx)\n{\n    return out << Formatter::get()->format(Mat(matx));\n}\n\ntemplate<typename _Tp> static inline\nstd::ostream& operator << (std::ostream& out, const Point_<_Tp>& p)\n{\n    out << \"[\" << p.x << \", \" << p.y << \"]\";\n    return out;\n}\n\ntemplate<typename _Tp> static inline\nstd::ostream& operator << (std::ostream& out, const Point3_<_Tp>& p)\n{\n    out << \"[\" << p.x << \", \" << p.y << \", \" << p.z << \"]\";\n    return out;\n}\n\ntemplate<typename _Tp, int n> static inline\nstd::ostream& operator << (std::ostream& out, const Vec<_Tp, n>& vec)\n{\n    out << \"[\";\n#ifdef _MSC_VER\n#pragma warning( push )\n#pragma warning( disable: 4127 )\n#endif\n    if(Vec<_Tp, n>::depth < CV_32F)\n#ifdef _MSC_VER\n#pragma warning( pop )\n#endif\n    {\n        for (int i = 0; i < n - 1; ++i) {\n            out << (int)vec[i] << \", \";\n        }\n        out << (int)vec[n-1] << \"]\";\n    }\n    else\n    {\n        for (int i = 0; i < n - 1; ++i) {\n            out << vec[i] << \", \";\n        }\n        out << vec[n-1] << \"]\";\n    }\n\n    return out;\n}\n\ntemplate<typename _Tp> static inline\nstd::ostream& operator << (std::ostream& out, const Size_<_Tp>& size)\n{\n    return out << \"[\" << size.width << \" x \" << size.height << \"]\";\n}\n\ntemplate<typename _Tp> static inline\nstd::ostream& operator << (std::ostream& out, const Rect_<_Tp>& rect)\n{\n    return out << \"[\" << rect.width << \" x \" << rect.height << \" from (\" << rect.x << \", \" << rect.y << \")]\";\n}\n\n\n#endif // OPENCV_NOSTL\n} // cv\n\n//! @endcond\n\n#endif // __OPENCV_CORE_CVSTDINL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/directx.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors as is and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the copyright holders or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_DIRECTX_HPP__\n#define __OPENCV_CORE_DIRECTX_HPP__\n\n#include \"mat.hpp\"\n#include \"ocl.hpp\"\n\n#if !defined(__d3d11_h__)\nstruct ID3D11Device;\nstruct ID3D11Texture2D;\n#endif\n\n#if !defined(__d3d10_h__)\nstruct ID3D10Device;\nstruct ID3D10Texture2D;\n#endif\n\n#if !defined(_D3D9_H_)\nstruct IDirect3DDevice9;\nstruct IDirect3DDevice9Ex;\nstruct IDirect3DSurface9;\n#endif\n\n\nnamespace cv { namespace directx {\n\nnamespace ocl {\nusing namespace cv::ocl;\n\n//! @addtogroup core_directx\n// This section describes OpenCL and DirectX interoperability.\n//\n// To enable DirectX support, configure OpenCV using CMake with WITH_DIRECTX=ON . Note, DirectX is\n// supported only on Windows.\n//\n// To use OpenCL functionality you should first initialize OpenCL context from DirectX resource.\n//\n//! @{\n\n// TODO static functions in the Context class\n//! @brief Creates OpenCL context from D3D11 device\n//\n//! @param pD3D11Device - pointer to D3D11 device\n//! @return Returns reference to OpenCL Context\nCV_EXPORTS Context& initializeContextFromD3D11Device(ID3D11Device* pD3D11Device);\n\n//! @brief Creates OpenCL context from D3D10 device\n//\n//! @param pD3D10Device - pointer to D3D10 device\n//! @return Returns reference to OpenCL Context\nCV_EXPORTS Context& initializeContextFromD3D10Device(ID3D10Device* pD3D10Device);\n\n//! @brief Creates OpenCL context from Direct3DDevice9Ex device\n//\n//! @param pDirect3DDevice9Ex - pointer to Direct3DDevice9Ex device\n//! @return Returns reference to OpenCL Context\nCV_EXPORTS Context& initializeContextFromDirect3DDevice9Ex(IDirect3DDevice9Ex* pDirect3DDevice9Ex);\n\n//! @brief Creates OpenCL context from Direct3DDevice9 device\n//\n//! @param pDirect3DDevice9 - pointer to Direct3Device9 device\n//! @return Returns reference to OpenCL Context\nCV_EXPORTS Context& initializeContextFromDirect3DDevice9(IDirect3DDevice9* pDirect3DDevice9);\n\n//! @}\n\n} // namespace cv::directx::ocl\n\n//! @addtogroup core_directx\n//! @{\n\n//! @brief Converts InputArray to ID3D11Texture2D. If destination texture format is DXGI_FORMAT_NV12 then\n//!        input UMat expected to be in BGR format and data will be downsampled and color-converted to NV12.\n//\n//! @note Note: Destination texture must be allocated by application. Function does memory copy from src to\n//!             pD3D11Texture2D\n//\n//! @param src - source InputArray\n//! @param pD3D11Texture2D - destination D3D11 texture\nCV_EXPORTS void convertToD3D11Texture2D(InputArray src, ID3D11Texture2D* pD3D11Texture2D);\n\n//! @brief Converts ID3D11Texture2D to OutputArray. If input texture format is DXGI_FORMAT_NV12 then\n//!        data will be upsampled and color-converted to BGR format.\n//\n//! @note Note: Destination matrix will be re-allocated if it has not enough memory to match texture size.\n//!             function does memory copy from pD3D11Texture2D to dst\n//\n//! @param pD3D11Texture2D - source D3D11 texture\n//! @param dst             - destination OutputArray\nCV_EXPORTS void convertFromD3D11Texture2D(ID3D11Texture2D* pD3D11Texture2D, OutputArray dst);\n\n//! @brief Converts InputArray to ID3D10Texture2D\n//\n//! @note Note: function does memory copy from src to\n//!             pD3D10Texture2D\n//\n//! @param src             - source InputArray\n//! @param pD3D10Texture2D - destination D3D10 texture\nCV_EXPORTS void convertToD3D10Texture2D(InputArray src, ID3D10Texture2D* pD3D10Texture2D);\n\n//! @brief Converts ID3D10Texture2D to OutputArray\n//\n//! @note Note: function does memory copy from pD3D10Texture2D\n//!             to dst\n//\n//! @param pD3D10Texture2D - source D3D10 texture\n//! @param dst             - destination OutputArray\nCV_EXPORTS void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst);\n\n//! @brief Converts InputArray to IDirect3DSurface9\n//\n//! @note Note: function does memory copy from src to\n//!             pDirect3DSurface9\n//\n//! @param src                 - source InputArray\n//! @param pDirect3DSurface9   - destination D3D10 texture\n//! @param surfaceSharedHandle - shared handle\nCV_EXPORTS void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurface9, void* surfaceSharedHandle = NULL);\n\n//! @brief Converts IDirect3DSurface9 to OutputArray\n//\n//! @note Note: function does memory copy from pDirect3DSurface9\n//!             to dst\n//\n//! @param pDirect3DSurface9   - source D3D10 texture\n//! @param dst                 - destination OutputArray\n//! @param surfaceSharedHandle - shared handle\nCV_EXPORTS void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArray dst, void* surfaceSharedHandle = NULL);\n\n//! @brief Get OpenCV type from DirectX type\n//! @param iDXGI_FORMAT - enum DXGI_FORMAT for D3D10/D3D11\n//! @return OpenCV type or -1 if there is no equivalent\nCV_EXPORTS int getTypeFromDXGI_FORMAT(const int iDXGI_FORMAT); // enum DXGI_FORMAT for D3D10/D3D11\n\n//! @brief Get OpenCV type from DirectX type\n//! @param iD3DFORMAT - enum D3DTYPE for D3D9\n//! @return OpenCV type or -1 if there is no equivalent\nCV_EXPORTS int getTypeFromD3DFORMAT(const int iD3DFORMAT); // enum D3DTYPE for D3D9\n\n//! @}\n\n} } // namespace cv::directx\n\n#endif // __OPENCV_CORE_DIRECTX_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/eigen.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n\n#ifndef __OPENCV_CORE_EIGEN_HPP__\n#define __OPENCV_CORE_EIGEN_HPP__\n\n#include \"opencv2/core.hpp\"\n\n#if defined _MSC_VER && _MSC_VER >= 1200\n#pragma warning( disable: 4714 ) //__forceinline is not inlined\n#pragma warning( disable: 4127 ) //conditional expression is constant\n#pragma warning( disable: 4244 ) //conversion from '__int64' to 'int', possible loss of data\n#endif\n\nnamespace cv\n{\n\n//! @addtogroup core_eigen\n//! @{\n\ntemplate<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> static inline\nvoid eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, Mat& dst )\n{\n    if( !(src.Flags & Eigen::RowMajorBit) )\n    {\n        Mat _src(src.cols(), src.rows(), DataType<_Tp>::type,\n              (void*)src.data(), src.stride()*sizeof(_Tp));\n        transpose(_src, dst);\n    }\n    else\n    {\n        Mat _src(src.rows(), src.cols(), DataType<_Tp>::type,\n                 (void*)src.data(), src.stride()*sizeof(_Tp));\n        _src.copyTo(dst);\n    }\n}\n\n// Matx case\ntemplate<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> static inline\nvoid eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src,\n               Matx<_Tp, _rows, _cols>& dst )\n{\n    if( !(src.Flags & Eigen::RowMajorBit) )\n    {\n        dst = Matx<_Tp, _cols, _rows>(static_cast<const _Tp*>(src.data())).t();\n    }\n    else\n    {\n        dst = Matx<_Tp, _rows, _cols>(static_cast<const _Tp*>(src.data()));\n    }\n}\n\ntemplate<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> static inline\nvoid cv2eigen( const Mat& src,\n               Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst )\n{\n    CV_DbgAssert(src.rows == _rows && src.cols == _cols);\n    if( !(dst.Flags & Eigen::RowMajorBit) )\n    {\n        const Mat _dst(src.cols, src.rows, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        if( src.type() == _dst.type() )\n            transpose(src, _dst);\n        else if( src.cols == src.rows )\n        {\n            src.convertTo(_dst, _dst.type());\n            transpose(_dst, _dst);\n        }\n        else\n            Mat(src.t()).convertTo(_dst, _dst.type());\n    }\n    else\n    {\n        const Mat _dst(src.rows, src.cols, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        src.convertTo(_dst, _dst.type());\n    }\n}\n\n// Matx case\ntemplate<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols> static inline\nvoid cv2eigen( const Matx<_Tp, _rows, _cols>& src,\n               Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst )\n{\n    if( !(dst.Flags & Eigen::RowMajorBit) )\n    {\n        const Mat _dst(_cols, _rows, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        transpose(src, _dst);\n    }\n    else\n    {\n        const Mat _dst(_rows, _cols, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        Mat(src).copyTo(_dst);\n    }\n}\n\ntemplate<typename _Tp>  static inline\nvoid cv2eigen( const Mat& src,\n               Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst )\n{\n    dst.resize(src.rows, src.cols);\n    if( !(dst.Flags & Eigen::RowMajorBit) )\n    {\n        const Mat _dst(src.cols, src.rows, DataType<_Tp>::type,\n             dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        if( src.type() == _dst.type() )\n            transpose(src, _dst);\n        else if( src.cols == src.rows )\n        {\n            src.convertTo(_dst, _dst.type());\n            transpose(_dst, _dst);\n        }\n        else\n            Mat(src.t()).convertTo(_dst, _dst.type());\n    }\n    else\n    {\n        const Mat _dst(src.rows, src.cols, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        src.convertTo(_dst, _dst.type());\n    }\n}\n\n// Matx case\ntemplate<typename _Tp, int _rows, int _cols> static inline\nvoid cv2eigen( const Matx<_Tp, _rows, _cols>& src,\n               Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst )\n{\n    dst.resize(_rows, _cols);\n    if( !(dst.Flags & Eigen::RowMajorBit) )\n    {\n        const Mat _dst(_cols, _rows, DataType<_Tp>::type,\n             dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        transpose(src, _dst);\n    }\n    else\n    {\n        const Mat _dst(_rows, _cols, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        Mat(src).copyTo(_dst);\n    }\n}\n\ntemplate<typename _Tp> static inline\nvoid cv2eigen( const Mat& src,\n               Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst )\n{\n    CV_Assert(src.cols == 1);\n    dst.resize(src.rows);\n\n    if( !(dst.Flags & Eigen::RowMajorBit) )\n    {\n        const Mat _dst(src.cols, src.rows, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        if( src.type() == _dst.type() )\n            transpose(src, _dst);\n        else\n            Mat(src.t()).convertTo(_dst, _dst.type());\n    }\n    else\n    {\n        const Mat _dst(src.rows, src.cols, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        src.convertTo(_dst, _dst.type());\n    }\n}\n\n// Matx case\ntemplate<typename _Tp, int _rows> static inline\nvoid cv2eigen( const Matx<_Tp, _rows, 1>& src,\n               Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst )\n{\n    dst.resize(_rows);\n\n    if( !(dst.Flags & Eigen::RowMajorBit) )\n    {\n        const Mat _dst(1, _rows, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        transpose(src, _dst);\n    }\n    else\n    {\n        const Mat _dst(_rows, 1, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        src.copyTo(_dst);\n    }\n}\n\n\ntemplate<typename _Tp> static inline\nvoid cv2eigen( const Mat& src,\n               Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst )\n{\n    CV_Assert(src.rows == 1);\n    dst.resize(src.cols);\n    if( !(dst.Flags & Eigen::RowMajorBit) )\n    {\n        const Mat _dst(src.cols, src.rows, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        if( src.type() == _dst.type() )\n            transpose(src, _dst);\n        else\n            Mat(src.t()).convertTo(_dst, _dst.type());\n    }\n    else\n    {\n        const Mat _dst(src.rows, src.cols, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        src.convertTo(_dst, _dst.type());\n    }\n}\n\n//Matx\ntemplate<typename _Tp, int _cols> static inline\nvoid cv2eigen( const Matx<_Tp, 1, _cols>& src,\n               Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst )\n{\n    dst.resize(_cols);\n    if( !(dst.Flags & Eigen::RowMajorBit) )\n    {\n        const Mat _dst(_cols, 1, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        transpose(src, _dst);\n    }\n    else\n    {\n        const Mat _dst(1, _cols, DataType<_Tp>::type,\n                 dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));\n        Mat(src).copyTo(_dst);\n    }\n}\n\n//! @}\n\n} // cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/fast_math.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_FAST_MATH_HPP__\n#define __OPENCV_CORE_FAST_MATH_HPP__\n\n#include \"opencv2/core/cvdef.h\"\n\n//! @addtogroup core_utils\n//! @{\n\n/****************************************************************************************\\\n*                                      fast math                                         *\n\\****************************************************************************************/\n\n#if defined __BORLANDC__\n#  include <fastmath.h>\n#elif defined __cplusplus\n#  include <cmath>\n#else\n#  include <math.h>\n#endif\n\n#ifdef HAVE_TEGRA_OPTIMIZATION\n#  include \"tegra_round.hpp\"\n#endif\n\n#if CV_VFP\n    // 1. general scheme\n    #define ARM_ROUND(_value, _asm_string) \\\n        int res; \\\n        float temp; \\\n        asm(_asm_string : [res] \"=r\" (res), [temp] \"=w\" (temp) : [value] \"w\" (_value)); \\\n        return res\n    // 2. version for double\n    #ifdef __clang__\n        #define ARM_ROUND_DBL(value) ARM_ROUND(value, \"vcvtr.s32.f64 %[temp], %[value] \\n vmov %[res], %[temp]\")\n    #else\n        #define ARM_ROUND_DBL(value) ARM_ROUND(value, \"vcvtr.s32.f64 %[temp], %P[value] \\n vmov %[res], %[temp]\")\n    #endif\n    // 3. version for float\n    #define ARM_ROUND_FLT(value) ARM_ROUND(value, \"vcvtr.s32.f32 %[temp], %[value]\\n vmov %[res], %[temp]\")\n#endif // CV_VFP\n\n/** @brief Rounds floating-point number to the nearest integer\n\n @param value floating-point number. If the value is outside of INT_MIN ... INT_MAX range, the\n result is not defined.\n */\nCV_INLINE int\ncvRound( double value )\n{\n#if ((defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ \\\n    && defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)\n    __m128d t = _mm_set_sd( value );\n    return _mm_cvtsd_si32(t);\n#elif defined _MSC_VER && defined _M_IX86\n    int t;\n    __asm\n    {\n        fld value;\n        fistp t;\n    }\n    return t;\n#elif ((defined _MSC_VER && defined _M_ARM) || defined CV_ICC || \\\n        defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION\n    TEGRA_ROUND_DBL(value);\n#elif defined CV_ICC || defined __GNUC__\n# if CV_VFP\n    ARM_ROUND_DBL(value);\n# else\n    return (int)lrint(value);\n# endif\n#else\n    /* it's ok if round does not comply with IEEE754 standard;\n       the tests should allow +/-1 difference when the tested functions use round */\n    return (int)(value + (value >= 0 ? 0.5 : -0.5));\n#endif\n}\n\n\n/** @brief Rounds floating-point number to the nearest integer not larger than the original.\n\n The function computes an integer i such that:\n \\f[i \\le \\texttt{value} < i+1\\f]\n @param value floating-point number. If the value is outside of INT_MIN ... INT_MAX range, the\n result is not defined.\n */\nCV_INLINE int cvFloor( double value )\n{\n#if (defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)\n    __m128d t = _mm_set_sd( value );\n    int i = _mm_cvtsd_si32(t);\n    return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i)));\n#elif defined __GNUC__\n    int i = (int)value;\n    return i - (i > value);\n#else\n    int i = cvRound(value);\n    float diff = (float)(value - i);\n    return i - (diff < 0);\n#endif\n}\n\n/** @brief Rounds floating-point number to the nearest integer not smaller than the original.\n\n The function computes an integer i such that:\n \\f[i \\le \\texttt{value} < i+1\\f]\n @param value floating-point number. If the value is outside of INT_MIN ... INT_MAX range, the\n result is not defined.\n */\nCV_INLINE int cvCeil( double value )\n{\n#if (defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__&& !defined __APPLE__)) && !defined(__CUDACC__)\n    __m128d t = _mm_set_sd( value );\n    int i = _mm_cvtsd_si32(t);\n    return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t));\n#elif defined __GNUC__\n    int i = (int)value;\n    return i + (i < value);\n#else\n    int i = cvRound(value);\n    float diff = (float)(i - value);\n    return i + (diff < 0);\n#endif\n}\n\n/** @brief Determines if the argument is Not A Number.\n\n @param value The input floating-point value\n\n The function returns 1 if the argument is Not A Number (as defined by IEEE754 standard), 0\n otherwise. */\nCV_INLINE int cvIsNaN( double value )\n{\n    Cv64suf ieee754;\n    ieee754.f = value;\n    return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) +\n           ((unsigned)ieee754.u != 0) > 0x7ff00000;\n}\n\n/** @brief Determines if the argument is Infinity.\n\n @param value The input floating-point value\n\n The function returns 1 if the argument is a plus or minus infinity (as defined by IEEE754 standard)\n and 0 otherwise. */\nCV_INLINE int cvIsInf( double value )\n{\n    Cv64suf ieee754;\n    ieee754.f = value;\n    return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 &&\n            (unsigned)ieee754.u == 0;\n}\n\n#ifdef __cplusplus\n\n/** @overload */\nCV_INLINE int cvRound(float value)\n{\n#if ((defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ && \\\n      defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)\n    __m128 t = _mm_set_ss( value );\n    return _mm_cvtss_si32(t);\n#elif defined _MSC_VER && defined _M_IX86\n    int t;\n    __asm\n    {\n        fld value;\n        fistp t;\n    }\n    return t;\n#elif ((defined _MSC_VER && defined _M_ARM) || defined CV_ICC || \\\n        defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION\n    TEGRA_ROUND_FLT(value);\n#elif defined CV_ICC || defined __GNUC__\n# if CV_VFP\n    ARM_ROUND_FLT(value);\n# else\n    return (int)lrintf(value);\n# endif\n#else\n    /* it's ok if round does not comply with IEEE754 standard;\n     the tests should allow +/-1 difference when the tested functions use round */\n    return (int)(value + (value >= 0 ? 0.5f : -0.5f));\n#endif\n}\n\n/** @overload */\nCV_INLINE int cvRound( int value )\n{\n    return value;\n}\n\n/** @overload */\nCV_INLINE int cvFloor( float value )\n{\n#if (defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)\n    __m128 t = _mm_set_ss( value );\n    int i = _mm_cvtss_si32(t);\n    return i - _mm_movemask_ps(_mm_cmplt_ss(t, _mm_cvtsi32_ss(t,i)));\n#elif defined __GNUC__\n    int i = (int)value;\n    return i - (i > value);\n#else\n    int i = cvRound(value);\n    float diff = (float)(value - i);\n    return i - (diff < 0);\n#endif\n}\n\n/** @overload */\nCV_INLINE int cvFloor( int value )\n{\n    return value;\n}\n\n/** @overload */\nCV_INLINE int cvCeil( float value )\n{\n#if (defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__&& !defined __APPLE__)) && !defined(__CUDACC__)\n    __m128 t = _mm_set_ss( value );\n    int i = _mm_cvtss_si32(t);\n    return i + _mm_movemask_ps(_mm_cmplt_ss(_mm_cvtsi32_ss(t,i), t));\n#elif defined __GNUC__\n    int i = (int)value;\n    return i + (i < value);\n#else\n    int i = cvRound(value);\n    float diff = (float)(i - value);\n    return i + (diff < 0);\n#endif\n}\n\n/** @overload */\nCV_INLINE int cvCeil( int value )\n{\n    return value;\n}\n\n/** @overload */\nCV_INLINE int cvIsNaN( float value )\n{\n    Cv32suf ieee754;\n    ieee754.f = value;\n    return (ieee754.u & 0x7fffffff) > 0x7f800000;\n}\n\n/** @overload */\nCV_INLINE int cvIsInf( float value )\n{\n    Cv32suf ieee754;\n    ieee754.f = value;\n    return (ieee754.u & 0x7fffffff) == 0x7f800000;\n}\n\n#endif // __cplusplus\n\n//! @} core_utils\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/hal/hal.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_HAL_HPP__\n#define __OPENCV_HAL_HPP__\n\n#include \"opencv2/core/cvdef.h\"\n#include \"opencv2/core/hal/interface.h\"\n\n//! @cond IGNORED\n#define CALL_HAL(name, fun, ...) \\\n    int res = fun(__VA_ARGS__); \\\n    if (res == CV_HAL_ERROR_OK) \\\n        return; \\\n    else if (res != CV_HAL_ERROR_NOT_IMPLEMENTED) \\\n        CV_Error_(cv::Error::StsInternal, \\\n            (\"HAL implementation \" CVAUX_STR(name) \" ==> \" CVAUX_STR(fun) \" returned %d (0x%08x)\", res, res));\n//! @endcond\n\n\nnamespace cv { namespace hal {\n\n//! @addtogroup core_hal_functions\n//! @{\n\nCV_EXPORTS int normHamming(const uchar* a, int n);\nCV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n);\n\nCV_EXPORTS int normHamming(const uchar* a, int n, int cellSize);\nCV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n, int cellSize);\n\nCV_EXPORTS int LU32f(float* A, size_t astep, int m, float* b, size_t bstep, int n);\nCV_EXPORTS int LU64f(double* A, size_t astep, int m, double* b, size_t bstep, int n);\nCV_EXPORTS bool Cholesky32f(float* A, size_t astep, int m, float* b, size_t bstep, int n);\nCV_EXPORTS bool Cholesky64f(double* A, size_t astep, int m, double* b, size_t bstep, int n);\n\nCV_EXPORTS int normL1_(const uchar* a, const uchar* b, int n);\nCV_EXPORTS float normL1_(const float* a, const float* b, int n);\nCV_EXPORTS float normL2Sqr_(const float* a, const float* b, int n);\n\nCV_EXPORTS void exp32f(const float* src, float* dst, int n);\nCV_EXPORTS void exp64f(const double* src, double* dst, int n);\nCV_EXPORTS void log32f(const float* src, float* dst, int n);\nCV_EXPORTS void log64f(const double* src, double* dst, int n);\n\nCV_EXPORTS void fastAtan2(const float* y, const float* x, float* dst, int n, bool angleInDegrees);\nCV_EXPORTS void magnitude32f(const float* x, const float* y, float* dst, int n);\nCV_EXPORTS void magnitude64f(const double* x, const double* y, double* dst, int n);\nCV_EXPORTS void sqrt32f(const float* src, float* dst, int len);\nCV_EXPORTS void sqrt64f(const double* src, double* dst, int len);\nCV_EXPORTS void invSqrt32f(const float* src, float* dst, int len);\nCV_EXPORTS void invSqrt64f(const double* src, double* dst, int len);\n\nCV_EXPORTS void split8u(const uchar* src, uchar** dst, int len, int cn );\nCV_EXPORTS void split16u(const ushort* src, ushort** dst, int len, int cn );\nCV_EXPORTS void split32s(const int* src, int** dst, int len, int cn );\nCV_EXPORTS void split64s(const int64* src, int64** dst, int len, int cn );\n\nCV_EXPORTS void merge8u(const uchar** src, uchar* dst, int len, int cn );\nCV_EXPORTS void merge16u(const ushort** src, ushort* dst, int len, int cn );\nCV_EXPORTS void merge32s(const int** src, int* dst, int len, int cn );\nCV_EXPORTS void merge64s(const int64** src, int64* dst, int len, int cn );\n\nCV_EXPORTS void add8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void add8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void add16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void add16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void add32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void add32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void add64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* );\n\nCV_EXPORTS void sub8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void sub8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void sub16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void sub16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void sub32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void sub32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void sub64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* );\n\nCV_EXPORTS void max8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void max8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void max16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void max16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void max32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void max32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void max64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* );\n\nCV_EXPORTS void min8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void min8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void min16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void min16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void min32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void min32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void min64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* );\n\nCV_EXPORTS void absdiff8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void absdiff8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void absdiff16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void absdiff16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void absdiff32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void absdiff32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void absdiff64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* );\n\nCV_EXPORTS void and8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void or8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void xor8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* );\nCV_EXPORTS void not8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* );\n\nCV_EXPORTS void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop);\nCV_EXPORTS void cmp8s(const schar* src1, size_t step1, const schar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop);\nCV_EXPORTS void cmp16u(const ushort* src1, size_t step1, const ushort* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop);\nCV_EXPORTS void cmp16s(const short* src1, size_t step1, const short* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop);\nCV_EXPORTS void cmp32s(const int* src1, size_t step1, const int* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop);\nCV_EXPORTS void cmp32f(const float* src1, size_t step1, const float* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop);\nCV_EXPORTS void cmp64f(const double* src1, size_t step1, const double* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop);\n\nCV_EXPORTS void mul8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void mul8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void mul16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void mul16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void mul32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void mul32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void mul64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* scale);\n\nCV_EXPORTS void div8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void div8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void div16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void div16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void div32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void div32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void div64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* scale);\n\nCV_EXPORTS void recip8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void recip8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void recip16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void recip16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void recip32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void recip32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* scale);\nCV_EXPORTS void recip64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* scale);\n\nCV_EXPORTS void addWeighted8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _scalars );\nCV_EXPORTS void addWeighted8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* scalars );\nCV_EXPORTS void addWeighted16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* scalars );\nCV_EXPORTS void addWeighted16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* scalars );\nCV_EXPORTS void addWeighted32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* scalars );\nCV_EXPORTS void addWeighted32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* scalars );\nCV_EXPORTS void addWeighted64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* scalars );\n\n//! @} core_hal\n\n//=============================================================================\n// for binary compatibility with 3.0\n\n//! @cond IGNORED\n\nCV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n);\nCV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n);\nCV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n);\nCV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n);\n\nCV_EXPORTS void exp(const float* src, float* dst, int n);\nCV_EXPORTS void exp(const double* src, double* dst, int n);\nCV_EXPORTS void log(const float* src, float* dst, int n);\nCV_EXPORTS void log(const double* src, double* dst, int n);\n\nCV_EXPORTS void magnitude(const float* x, const float* y, float* dst, int n);\nCV_EXPORTS void magnitude(const double* x, const double* y, double* dst, int n);\nCV_EXPORTS void sqrt(const float* src, float* dst, int len);\nCV_EXPORTS void sqrt(const double* src, double* dst, int len);\nCV_EXPORTS void invSqrt(const float* src, float* dst, int len);\nCV_EXPORTS void invSqrt(const double* src, double* dst, int len);\n\n//! @endcond\n\n}} //cv::hal\n\n#endif //__OPENCV_HAL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/hal/interface.h",
    "content": "#ifndef _HAL_INTERFACE_HPP_INCLUDED_\n#define _HAL_INTERFACE_HPP_INCLUDED_\n\n//! @addtogroup core_hal_interface\n//! @{\n\n#define CV_HAL_ERROR_OK 0\n#define CV_HAL_ERROR_NOT_IMPLEMENTED 1\n#define CV_HAL_ERROR_UNKNOWN -1\n\n#define CV_HAL_CMP_EQ 0\n#define CV_HAL_CMP_GT 1\n#define CV_HAL_CMP_GE 2\n#define CV_HAL_CMP_LT 3\n#define CV_HAL_CMP_LE 4\n#define CV_HAL_CMP_NE 5\n\n#ifdef __cplusplus\n#include <cstddef>\n#else\n#include <stddef.h>\n#endif\n\n/* primitive types */\n/*\n  schar  - signed 1 byte integer\n  uchar  - unsigned 1 byte integer\n  short  - signed 2 byte integer\n  ushort - unsigned 2 byte integer\n  int    - signed 4 byte integer\n  uint   - unsigned 4 byte integer\n  int64  - signed 8 byte integer\n  uint64 - unsigned 8 byte integer\n*/\n\n#if !defined _MSC_VER && !defined __BORLANDC__\n#  if defined __cplusplus && __cplusplus >= 201103L && !defined __APPLE__\n#    include <cstdint>\n     typedef std::uint32_t uint;\n#  else\n#    include <stdint.h>\n     typedef uint32_t uint;\n#  endif\n#else\n   typedef unsigned uint;\n#endif\n\ntypedef signed char schar;\n\n#ifndef __IPL_H__\n   typedef unsigned char uchar;\n   typedef unsigned short ushort;\n#endif\n\n#if defined _MSC_VER || defined __BORLANDC__\n   typedef __int64 int64;\n   typedef unsigned __int64 uint64;\n#  define CV_BIG_INT(n)   n##I64\n#  define CV_BIG_UINT(n)  n##UI64\n#else\n   typedef int64_t int64;\n   typedef uint64_t uint64;\n#  define CV_BIG_INT(n)   n##LL\n#  define CV_BIG_UINT(n)  n##ULL\n#endif\n\n//! @}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/hal/intrin.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_HAL_INTRIN_HPP__\n#define __OPENCV_HAL_INTRIN_HPP__\n\n#include <cmath>\n#include <float.h>\n#include <stdlib.h>\n#include \"opencv2/core/cvdef.h\"\n\n#define OPENCV_HAL_ADD(a, b) ((a) + (b))\n#define OPENCV_HAL_AND(a, b) ((a) & (b))\n#define OPENCV_HAL_NOP(a) (a)\n#define OPENCV_HAL_1ST(a, b) (a)\n\n// unlike HAL API, which is in cv::hal,\n// we put intrinsics into cv namespace to make its\n// access from within opencv code more accessible\nnamespace cv {\n\n//! @addtogroup core_hal_intrin\n//! @{\n\n//! @cond IGNORED\ntemplate<typename _Tp> struct V_TypeTraits\n{\n    typedef _Tp int_type;\n    typedef _Tp uint_type;\n    typedef _Tp abs_type;\n    typedef _Tp sum_type;\n\n    enum { delta = 0, shift = 0 };\n\n    static int_type reinterpret_int(_Tp x) { return x; }\n    static uint_type reinterpet_uint(_Tp x) { return x; }\n    static _Tp reinterpret_from_int(int_type x) { return (_Tp)x; }\n};\n\ntemplate<> struct V_TypeTraits<uchar>\n{\n    typedef uchar value_type;\n    typedef schar int_type;\n    typedef uchar uint_type;\n    typedef uchar abs_type;\n    typedef int sum_type;\n\n    typedef ushort w_type;\n    typedef unsigned q_type;\n\n    enum { delta = 128, shift = 8 };\n\n    static int_type reinterpret_int(value_type x) { return (int_type)x; }\n    static uint_type reinterpret_uint(value_type x) { return (uint_type)x; }\n    static value_type reinterpret_from_int(int_type x) { return (value_type)x; }\n};\n\ntemplate<> struct V_TypeTraits<schar>\n{\n    typedef schar value_type;\n    typedef schar int_type;\n    typedef uchar uint_type;\n    typedef uchar abs_type;\n    typedef int sum_type;\n\n    typedef short w_type;\n    typedef int q_type;\n\n    enum { delta = 128, shift = 8 };\n\n    static int_type reinterpret_int(value_type x) { return (int_type)x; }\n    static uint_type reinterpret_uint(value_type x) { return (uint_type)x; }\n    static value_type reinterpret_from_int(int_type x) { return (value_type)x; }\n};\n\ntemplate<> struct V_TypeTraits<ushort>\n{\n    typedef ushort value_type;\n    typedef short int_type;\n    typedef ushort uint_type;\n    typedef ushort abs_type;\n    typedef int sum_type;\n\n    typedef unsigned w_type;\n    typedef uchar nu_type;\n\n    enum { delta = 32768, shift = 16 };\n\n    static int_type reinterpret_int(value_type x) { return (int_type)x; }\n    static uint_type reinterpret_uint(value_type x) { return (uint_type)x; }\n    static value_type reinterpret_from_int(int_type x) { return (value_type)x; }\n};\n\ntemplate<> struct V_TypeTraits<short>\n{\n    typedef short value_type;\n    typedef short int_type;\n    typedef ushort uint_type;\n    typedef ushort abs_type;\n    typedef int sum_type;\n\n    typedef int w_type;\n    typedef uchar nu_type;\n    typedef schar n_type;\n\n    enum { delta = 128, shift = 8 };\n\n    static int_type reinterpret_int(value_type x) { return (int_type)x; }\n    static uint_type reinterpret_uint(value_type x) { return (uint_type)x; }\n    static value_type reinterpret_from_int(int_type x) { return (value_type)x; }\n};\n\ntemplate<> struct V_TypeTraits<unsigned>\n{\n    typedef unsigned value_type;\n    typedef int int_type;\n    typedef unsigned uint_type;\n    typedef unsigned abs_type;\n    typedef unsigned sum_type;\n\n    typedef uint64 w_type;\n    typedef ushort nu_type;\n\n    static int_type reinterpret_int(value_type x) { return (int_type)x; }\n    static uint_type reinterpret_uint(value_type x) { return (uint_type)x; }\n    static value_type reinterpret_from_int(int_type x) { return (value_type)x; }\n};\n\ntemplate<> struct V_TypeTraits<int>\n{\n    typedef int value_type;\n    typedef int int_type;\n    typedef unsigned uint_type;\n    typedef unsigned abs_type;\n    typedef int sum_type;\n\n    typedef int64 w_type;\n    typedef short n_type;\n    typedef ushort nu_type;\n\n    static int_type reinterpret_int(value_type x) { return (int_type)x; }\n    static uint_type reinterpret_uint(value_type x) { return (uint_type)x; }\n    static value_type reinterpret_from_int(int_type x) { return (value_type)x; }\n};\n\ntemplate<> struct V_TypeTraits<uint64>\n{\n    typedef uint64 value_type;\n    typedef int64 int_type;\n    typedef uint64 uint_type;\n    typedef uint64 abs_type;\n    typedef uint64 sum_type;\n\n    typedef unsigned nu_type;\n\n    static int_type reinterpret_int(value_type x) { return (int_type)x; }\n    static uint_type reinterpret_uint(value_type x) { return (uint_type)x; }\n    static value_type reinterpret_from_int(int_type x) { return (value_type)x; }\n};\n\ntemplate<> struct V_TypeTraits<int64>\n{\n    typedef int64 value_type;\n    typedef int64 int_type;\n    typedef uint64 uint_type;\n    typedef uint64 abs_type;\n    typedef int64 sum_type;\n\n    typedef int nu_type;\n\n    static int_type reinterpret_int(value_type x) { return (int_type)x; }\n    static uint_type reinterpret_uint(value_type x) { return (uint_type)x; }\n    static value_type reinterpret_from_int(int_type x) { return (value_type)x; }\n};\n\n\ntemplate<> struct V_TypeTraits<float>\n{\n    typedef float value_type;\n    typedef int int_type;\n    typedef unsigned uint_type;\n    typedef float abs_type;\n    typedef float sum_type;\n\n    typedef double w_type;\n\n    static int_type reinterpret_int(value_type x)\n    {\n        Cv32suf u;\n        u.f = x;\n        return u.i;\n    }\n    static uint_type reinterpet_uint(value_type x)\n    {\n        Cv32suf u;\n        u.f = x;\n        return u.u;\n    }\n    static value_type reinterpret_from_int(int_type x)\n    {\n        Cv32suf u;\n        u.i = x;\n        return u.f;\n    }\n};\n\ntemplate<> struct V_TypeTraits<double>\n{\n    typedef double value_type;\n    typedef int64 int_type;\n    typedef uint64 uint_type;\n    typedef double abs_type;\n    typedef double sum_type;\n    static int_type reinterpret_int(value_type x)\n    {\n        Cv64suf u;\n        u.f = x;\n        return u.i;\n    }\n    static uint_type reinterpet_uint(value_type x)\n    {\n        Cv64suf u;\n        u.f = x;\n        return u.u;\n    }\n    static value_type reinterpret_from_int(int_type x)\n    {\n        Cv64suf u;\n        u.i = x;\n        return u.f;\n    }\n};\n\ntemplate <typename T> struct V_SIMD128Traits\n{\n    enum { nlanes = 16 / sizeof(T) };\n};\n\n//! @endcond\n\n//! @}\n\n}\n\n#ifdef CV_DOXYGEN\n#   undef CV_SSE2\n#   undef CV_NEON\n#endif\n\n#if CV_SSE2\n\n#include \"opencv2/core/hal/intrin_sse.hpp\"\n\n#elif CV_NEON\n\n#include \"opencv2/core/hal/intrin_neon.hpp\"\n\n#else\n\n#include \"opencv2/core/hal/intrin_cpp.hpp\"\n\n#endif\n\n//! @addtogroup core_hal_intrin\n//! @{\n\n#ifndef CV_SIMD128\n//! Set to 1 if current compiler supports vector extensions (NEON or SSE is enabled)\n#define CV_SIMD128 0\n#endif\n\n#ifndef CV_SIMD128_64F\n//! Set to 1 if current intrinsics implementation supports 64-bit float vectors\n#define CV_SIMD128_64F 0\n#endif\n\n//! @}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/hal/intrin_cpp.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_HAL_INTRIN_CPP_HPP__\n#define __OPENCV_HAL_INTRIN_CPP_HPP__\n\n#include <limits>\n#include <cstring>\n#include <algorithm>\n#include \"opencv2/core/saturate.hpp\"\n\nnamespace cv\n{\n\n/** @addtogroup core_hal_intrin\n\n\"Universal intrinsics\" is a types and functions set intended to simplify vectorization of code on\ndifferent platforms. Currently there are two supported SIMD extensions: __SSE/SSE2__ on x86\narchitectures and __NEON__ on ARM architectures, both allow working with 128 bit registers\ncontaining packed values of different types. In case when there is no SIMD extension available\nduring compilation, fallback C++ implementation of intrinsics will be chosen and code will work as\nexpected although it could be slower.\n\n### Types\n\nThere are several types representing 128-bit register as a vector of packed values, each type is\nimplemented as a structure based on a one SIMD register.\n\n- cv::v_uint8x16 and cv::v_int8x16: sixteen 8-bit integer values (unsigned/signed) - char\n- cv::v_uint16x8 and cv::v_int16x8: eight 16-bit integer values (unsigned/signed) - short\n- cv::v_uint32x4 and cv::v_int32x4: four 32-bit integer values (unsgined/signed) - int\n- cv::v_uint64x2 and cv::v_int64x2: two 64-bit integer values (unsigned/signed) - int64\n- cv::v_float32x4: four 32-bit floating point values (signed) - float\n- cv::v_float64x2: two 64-bit floating point valies (signed) - double\n\n@note\ncv::v_float64x2 is not implemented in NEON variant, if you want to use this type, don't forget to\ncheck the CV_SIMD128_64F preprocessor definition:\n@code\n#if CV_SIMD128_64F\n//...\n#endif\n@endcode\n\n### Load and store operations\n\nThese operations allow to set contents of the register explicitly or by loading it from some memory\nblock and to save contents of the register to memory block.\n\n- Constructors:\n@ref v_reg::v_reg(const _Tp *ptr) \"from memory\",\n@ref v_reg::v_reg(_Tp s0, _Tp s1) \"from two values\", ...\n- Other create methods:\n@ref v_setall_s8, @ref v_setall_u8, ...,\n@ref v_setzero_u8, @ref v_setzero_s8, ...\n- Memory operations:\n@ref v_load, @ref v_load_aligned, @ref v_load_halves,\n@ref v_store, @ref v_store_aligned,\n@ref v_store_high, @ref v_store_low\n\n### Value reordering\n\nThese operations allow to reorder or recombine elements in one or multiple vectors.\n\n- Interleave, deinterleave (3 and 4 channels): @ref v_load_deinterleave, @ref v_store_interleave\n- Expand: @ref v_load_expand, @ref v_load_expand_q, @ref v_expand\n- Pack: @ref v_pack, @ref v_pack_u, @ref v_rshr_pack, @ref v_rshr_pack_u,\n@ref v_pack_store, @ref v_pack_u_store, @ref v_rshr_pack_store, @ref v_rshr_pack_u_store\n- Recombine: @ref v_zip, @ref v_recombine, @ref v_combine_low, @ref v_combine_high\n- Extract: @ref v_extract\n\n\n### Arithmetic, bitwise and comparison operations\n\nElement-wise binary and unary operations.\n\n- Arithmetics:\n@ref operator+(const v_reg &a, const v_reg &b) \"+\",\n@ref operator-(const v_reg &a, const v_reg &b) \"-\",\n@ref operator*(const v_reg &a, const v_reg &b) \"*\",\n@ref operator/(const v_reg &a, const v_reg &b) \"/\",\n@ref v_mul_expand\n\n- Non-saturating arithmetics: @ref v_add_wrap, @ref v_sub_wrap\n\n- Bitwise shifts:\n@ref operator<<(const v_reg &a, int s) \"<<\",\n@ref operator>>(const v_reg &a, int s) \">>\",\n@ref v_shl, @ref v_shr\n\n- Bitwise logic:\n@ref operator&(const v_reg &a, const v_reg &b) \"&\",\n@ref operator|(const v_reg &a, const v_reg &b) \"|\",\n@ref operator^(const v_reg &a, const v_reg &b) \"^\",\n@ref operator~(const v_reg &a) \"~\"\n\n- Comparison:\n@ref operator>(const v_reg &a, const v_reg &b) \">\",\n@ref operator>=(const v_reg &a, const v_reg &b) \">=\",\n@ref operator<(const v_reg &a, const v_reg &b) \"<\",\n@ref operator<=(const v_reg &a, const v_reg &b) \"<=\",\n@ref operator==(const v_reg &a, const v_reg &b) \"==\",\n@ref operator!=(const v_reg &a, const v_reg &b) \"!=\"\n\n- min/max: @ref v_min, @ref v_max\n\n### Reduce and mask\n\nMost of these operations return only one value.\n\n- Reduce: @ref v_reduce_min, @ref v_reduce_max, @ref v_reduce_sum\n- Mask: @ref v_signmask, @ref v_check_all, @ref v_check_any, @ref v_select\n\n### Other math\n\n- Some frequent operations: @ref v_sqrt, @ref v_invsqrt, @ref v_magnitude, @ref v_sqr_magnitude\n- Absolute values: @ref v_abs, @ref v_absdiff\n\n### Conversions\n\nDifferent type conversions and casts:\n\n- Rounding: @ref v_round, @ref v_floor, @ref v_ceil, @ref v_trunc,\n- To float: @ref v_cvt_f32, @ref v_cvt_f64\n- Reinterpret: @ref v_reinterpret_as_u8, @ref v_reinterpret_as_s8, ...\n\n### Matrix operations\n\nIn these operations vectors represent matrix rows/columns: @ref v_dotprod, @ref v_matmul, @ref v_transpose4x4\n\n### Usability\n\nMost operations are implemented only for some subset of the available types, following matrices\nshows the applicability of different operations to the types.\n\nRegular integers:\n\n| Operations\\\\Types | uint 8x16 | int 8x16 | uint 16x8 | int 16x8 | uint 32x4 | int 32x4 |\n|-------------------|:-:|:-:|:-:|:-:|:-:|:-:|\n|load, store        | x | x | x | x | x | x |\n|interleave         | x | x | x | x | x | x |\n|expand             | x | x | x | x | x | x |\n|expand_q           | x | x |   |   |   |   |\n|add, sub           | x | x | x | x | x | x |\n|add_wrap, sub_wrap | x | x | x | x |   |   |\n|mul                |   |   | x | x | x | x |\n|mul_expand         |   |   | x | x | x |   |\n|compare            | x | x | x | x | x | x |\n|shift              |   |   | x | x | x | x |\n|dotprod            |   |   |   | x |   |   |\n|logical            | x | x | x | x | x | x |\n|min, max           | x | x | x | x | x | x |\n|absdiff            | x | x | x | x | x | x |\n|reduce             |   |   |   |   | x | x |\n|mask               | x | x | x | x | x | x |\n|pack               | x | x | x | x | x | x |\n|pack_u             | x |   | x |   |   |   |\n|unpack             | x | x | x | x | x | x |\n|extract            | x | x | x | x | x | x |\n|cvt_flt32          |   |   |   |   |   | x |\n|cvt_flt64          |   |   |   |   |   | x |\n|transpose4x4       |   |   |   |   | x | x |\n\nBig integers:\n\n| Operations\\\\Types | uint 64x2 | int 64x2 |\n|-------------------|:-:|:-:|\n|load, store        | x | x |\n|add, sub           | x | x |\n|shift              | x | x |\n|logical            | x | x |\n|extract            | x | x |\n\nFloating point:\n\n| Operations\\\\Types | float 32x4 | float 64x2 |\n|-------------------|:-:|:-:|\n|load, store        | x | x |\n|interleave         | x |   |\n|add, sub           | x | x |\n|mul                | x | x |\n|div                | x | x |\n|compare            | x | x |\n|min, max           | x | x |\n|absdiff            | x | x |\n|reduce             | x |   |\n|mask               | x | x |\n|unpack             | x | x |\n|cvt_flt32          |   | x |\n|cvt_flt64          | x |   |\n|sqrt, abs          | x | x |\n|float math         | x | x |\n|transpose4x4       | x |   |\n\n\n @{ */\n\ntemplate<typename _Tp, int n> struct v_reg\n{\n//! @cond IGNORED\n    typedef _Tp lane_type;\n    typedef v_reg<typename V_TypeTraits<_Tp>::int_type, n> int_vec;\n    typedef v_reg<typename V_TypeTraits<_Tp>::abs_type, n> abs_vec;\n    enum { nlanes = n };\n// !@endcond\n\n    /** @brief Constructor\n\n    Initializes register with data from memory\n    @param ptr pointer to memory block with data for register */\n    explicit v_reg(const _Tp* ptr) { for( int i = 0; i < n; i++ ) s[i] = ptr[i]; }\n\n    /** @brief Constructor\n\n    Initializes register with two 64-bit values */\n    v_reg(_Tp s0, _Tp s1) { s[0] = s0; s[1] = s1; }\n\n    /** @brief Constructor\n\n    Initializes register with four 32-bit values */\n    v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3) { s[0] = s0; s[1] = s1; s[2] = s2; s[3] = s3; }\n\n    /** @brief Constructor\n\n    Initializes register with eight 16-bit values */\n    v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3,\n           _Tp s4, _Tp s5, _Tp s6, _Tp s7)\n    {\n        s[0] = s0; s[1] = s1; s[2] = s2; s[3] = s3;\n        s[4] = s4; s[5] = s5; s[6] = s6; s[7] = s7;\n    }\n\n    /** @brief Constructor\n\n    Initializes register with sixteen 8-bit values */\n    v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3,\n           _Tp s4, _Tp s5, _Tp s6, _Tp s7,\n           _Tp s8, _Tp s9, _Tp s10, _Tp s11,\n           _Tp s12, _Tp s13, _Tp s14, _Tp s15)\n    {\n        s[0] = s0; s[1] = s1; s[2] = s2; s[3] = s3;\n        s[4] = s4; s[5] = s5; s[6] = s6; s[7] = s7;\n        s[8] = s8; s[9] = s9; s[10] = s10; s[11] = s11;\n        s[12] = s12; s[13] = s13; s[14] = s14; s[15] = s15;\n    }\n\n    /** @brief Default constructor\n\n    Does not initialize anything*/\n    v_reg() {}\n\n    /** @brief Copy constructor */\n    v_reg(const v_reg<_Tp, n> & r)\n    {\n        for( int i = 0; i < n; i++ )\n            s[i] = r.s[i];\n    }\n    /** @brief Access first value\n\n    Returns value of the first lane according to register type, for example:\n    @code{.cpp}\n    v_int32x4 r(1, 2, 3, 4);\n    int v = r.get0(); // returns 1\n    v_uint64x2 r(1, 2);\n    uint64_t v = r.get0(); // returns 1\n    @endcode\n    */\n    _Tp get0() const { return s[0]; }\n\n//! @cond IGNORED\n    _Tp get(const int i) const { return s[i]; }\n    v_reg<_Tp, n> high() const\n    {\n        v_reg<_Tp, n> c;\n        int i;\n        for( i = 0; i < n/2; i++ )\n        {\n            c.s[i] = s[i+(n/2)];\n            c.s[i+(n/2)] = 0;\n        }\n        return c;\n    }\n\n    static v_reg<_Tp, n> zero()\n    {\n        v_reg<_Tp, n> c;\n        for( int i = 0; i < n; i++ )\n            c.s[i] = (_Tp)0;\n        return c;\n    }\n\n    static v_reg<_Tp, n> all(_Tp s)\n    {\n        v_reg<_Tp, n> c;\n        for( int i = 0; i < n; i++ )\n            c.s[i] = s;\n        return c;\n    }\n\n    template<typename _Tp2, int n2> v_reg<_Tp2, n2> reinterpret_as() const\n    {\n        size_t bytes = std::min(sizeof(_Tp2)*n2, sizeof(_Tp)*n);\n        v_reg<_Tp2, n2> c;\n        std::memcpy(&c.s[0], &s[0], bytes);\n        return c;\n    }\n\n    _Tp s[n];\n//! @endcond\n};\n\n/** @brief Sixteen 8-bit unsigned integer values */\ntypedef v_reg<uchar, 16> v_uint8x16;\n/** @brief Sixteen 8-bit signed integer values */\ntypedef v_reg<schar, 16> v_int8x16;\n/** @brief Eight 16-bit unsigned integer values */\ntypedef v_reg<ushort, 8> v_uint16x8;\n/** @brief Eight 16-bit signed integer values */\ntypedef v_reg<short, 8> v_int16x8;\n/** @brief Four 32-bit unsigned integer values */\ntypedef v_reg<unsigned, 4> v_uint32x4;\n/** @brief Four 32-bit signed integer values */\ntypedef v_reg<int, 4> v_int32x4;\n/** @brief Four 32-bit floating point values (single precision) */\ntypedef v_reg<float, 4> v_float32x4;\n/** @brief Two 64-bit floating point values (double precision) */\ntypedef v_reg<double, 2> v_float64x2;\n/** @brief Two 64-bit unsigned integer values */\ntypedef v_reg<uint64, 2> v_uint64x2;\n/** @brief Two 64-bit signed integer values */\ntypedef v_reg<int64, 2> v_int64x2;\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_BIN_OP(bin_op) \\\ntemplate<typename _Tp, int n> inline v_reg<_Tp, n> \\\n    operator bin_op (const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \\\n{ \\\n    v_reg<_Tp, n> c; \\\n    for( int i = 0; i < n; i++ ) \\\n        c.s[i] = saturate_cast<_Tp>(a.s[i] bin_op b.s[i]); \\\n    return c; \\\n} \\\ntemplate<typename _Tp, int n> inline v_reg<_Tp, n>& \\\n    operator bin_op##= (v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \\\n{ \\\n    for( int i = 0; i < n; i++ ) \\\n        a.s[i] = saturate_cast<_Tp>(a.s[i] bin_op b.s[i]); \\\n    return a; \\\n}\n\n/** @brief Add values\n\nFor all types. */\nOPENCV_HAL_IMPL_BIN_OP(+)\n\n/** @brief Subtract values\n\nFor all types. */\nOPENCV_HAL_IMPL_BIN_OP(-)\n\n/** @brief Multiply values\n\nFor 16- and 32-bit integer types and floating types. */\nOPENCV_HAL_IMPL_BIN_OP(*)\n\n/** @brief Divide values\n\nFor floating types only. */\nOPENCV_HAL_IMPL_BIN_OP(/)\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_BIT_OP(bit_op) \\\ntemplate<typename _Tp, int n> inline v_reg<_Tp, n> operator bit_op \\\n    (const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \\\n{ \\\n    v_reg<_Tp, n> c; \\\n    typedef typename V_TypeTraits<_Tp>::int_type itype; \\\n    for( int i = 0; i < n; i++ ) \\\n        c.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int((itype)(V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) bit_op \\\n                                                        V_TypeTraits<_Tp>::reinterpret_int(b.s[i]))); \\\n    return c; \\\n} \\\ntemplate<typename _Tp, int n> inline v_reg<_Tp, n>& operator \\\n    bit_op##= (v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \\\n{ \\\n    typedef typename V_TypeTraits<_Tp>::int_type itype; \\\n    for( int i = 0; i < n; i++ ) \\\n        a.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int((itype)(V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) bit_op \\\n                                                        V_TypeTraits<_Tp>::reinterpret_int(b.s[i]))); \\\n    return a; \\\n}\n\n/** @brief Bitwise AND\n\nOnly for integer types. */\nOPENCV_HAL_IMPL_BIT_OP(&)\n\n/** @brief Bitwise OR\n\nOnly for integer types. */\nOPENCV_HAL_IMPL_BIT_OP(|)\n\n/** @brief Bitwise XOR\n\nOnly for integer types.*/\nOPENCV_HAL_IMPL_BIT_OP(^)\n\n/** @brief Bitwise NOT\n\nOnly for integer types.*/\ntemplate<typename _Tp, int n> inline v_reg<_Tp, n> operator ~ (const v_reg<_Tp, n>& a)\n{\n    v_reg<_Tp, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int(~V_TypeTraits<_Tp>::reinterpret_int(a.s[i]));\n        return c;\n}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_MATH_FUNC(func, cfunc, _Tp2) \\\ntemplate<typename _Tp, int n> inline v_reg<_Tp2, n> func(const v_reg<_Tp, n>& a) \\\n{ \\\n    v_reg<_Tp2, n> c; \\\n    for( int i = 0; i < n; i++ ) \\\n        c.s[i] = cfunc(a.s[i]); \\\n    return c; \\\n}\n\n/** @brief Square root of elements\n\nOnly for floating point types.*/\nOPENCV_HAL_IMPL_MATH_FUNC(v_sqrt, std::sqrt, _Tp)\n\n//! @cond IGNORED\nOPENCV_HAL_IMPL_MATH_FUNC(v_sin, std::sin, _Tp)\nOPENCV_HAL_IMPL_MATH_FUNC(v_cos, std::cos, _Tp)\nOPENCV_HAL_IMPL_MATH_FUNC(v_exp, std::exp, _Tp)\nOPENCV_HAL_IMPL_MATH_FUNC(v_log, std::log, _Tp)\n//! @endcond\n\n/** @brief Absolute value of elements\n\nOnly for floating point types.*/\nOPENCV_HAL_IMPL_MATH_FUNC(v_abs, (typename V_TypeTraits<_Tp>::abs_type)std::abs,\n                          typename V_TypeTraits<_Tp>::abs_type)\n\n/** @brief Round elements\n\nOnly for floating point types.*/\nOPENCV_HAL_IMPL_MATH_FUNC(v_round, cvRound, int)\n\n/** @brief Floor elements\n\nOnly for floating point types.*/\nOPENCV_HAL_IMPL_MATH_FUNC(v_floor, cvFloor, int)\n\n/** @brief Ceil elements\n\nOnly for floating point types.*/\nOPENCV_HAL_IMPL_MATH_FUNC(v_ceil, cvCeil, int)\n\n/** @brief Truncate elements\n\nOnly for floating point types.*/\nOPENCV_HAL_IMPL_MATH_FUNC(v_trunc, int, int)\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_MINMAX_FUNC(func, cfunc) \\\ntemplate<typename _Tp, int n> inline v_reg<_Tp, n> func(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \\\n{ \\\n    v_reg<_Tp, n> c; \\\n    for( int i = 0; i < n; i++ ) \\\n        c.s[i] = cfunc(a.s[i], b.s[i]); \\\n    return c; \\\n}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_REDUCE_MINMAX_FUNC(func, cfunc) \\\ntemplate<typename _Tp, int n> inline _Tp func(const v_reg<_Tp, n>& a) \\\n{ \\\n    _Tp c = a.s[0]; \\\n    for( int i = 1; i < n; i++ ) \\\n        c = cfunc(c, a.s[i]); \\\n    return c; \\\n}\n\n/** @brief Choose min values for each pair\n\nScheme:\n@code\n{A1 A2 ...}\n{B1 B2 ...}\n--------------\n{min(A1,B1) min(A2,B2) ...}\n@endcode\nFor all types except 64-bit integer. */\nOPENCV_HAL_IMPL_MINMAX_FUNC(v_min, std::min)\n\n/** @brief Choose max values for each pair\n\nScheme:\n@code\n{A1 A2 ...}\n{B1 B2 ...}\n--------------\n{max(A1,B1) max(A2,B2) ...}\n@endcode\nFor all types except 64-bit integer. */\nOPENCV_HAL_IMPL_MINMAX_FUNC(v_max, std::max)\n\n/** @brief Find one min value\n\nScheme:\n@code\n{A1 A2 A3 ...} => min(A1,A2,A3,...)\n@endcode\nFor 32-bit integer and 32-bit floating point types. */\nOPENCV_HAL_IMPL_REDUCE_MINMAX_FUNC(v_reduce_min, std::min)\n\n/** @brief Find one max value\n\nScheme:\n@code\n{A1 A2 A3 ...} => max(A1,A2,A3,...)\n@endcode\nFor 32-bit integer and 32-bit floating point types. */\nOPENCV_HAL_IMPL_REDUCE_MINMAX_FUNC(v_reduce_max, std::max)\n\n//! @cond IGNORED\ntemplate<typename _Tp, int n>\ninline void v_minmax( const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b,\n                      v_reg<_Tp, n>& minval, v_reg<_Tp, n>& maxval )\n{\n    for( int i = 0; i < n; i++ )\n    {\n        minval.s[i] = std::min(a.s[i], b.s[i]);\n        maxval.s[i] = std::max(a.s[i], b.s[i]);\n    }\n}\n//! @endcond\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_CMP_OP(cmp_op) \\\ntemplate<typename _Tp, int n> \\\ninline v_reg<_Tp, n> operator cmp_op(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \\\n{ \\\n    typedef typename V_TypeTraits<_Tp>::int_type itype; \\\n    v_reg<_Tp, n> c; \\\n    for( int i = 0; i < n; i++ ) \\\n        c.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int((itype)-(int)(a.s[i] cmp_op b.s[i])); \\\n    return c; \\\n}\n\n/** @brief Less-than comparison\n\nFor all types except 64-bit integer values. */\nOPENCV_HAL_IMPL_CMP_OP(<)\n\n/** @brief Greater-than comparison\n\nFor all types except 64-bit integer values. */\nOPENCV_HAL_IMPL_CMP_OP(>)\n\n/** @brief Less-than or equal comparison\n\nFor all types except 64-bit integer values. */\nOPENCV_HAL_IMPL_CMP_OP(<=)\n\n/** @brief Greater-than or equal comparison\n\nFor all types except 64-bit integer values. */\nOPENCV_HAL_IMPL_CMP_OP(>=)\n\n/** @brief Equal comparison\n\nFor all types except 64-bit integer values. */\nOPENCV_HAL_IMPL_CMP_OP(==)\n\n/** @brief Not equal comparison\n\nFor all types except 64-bit integer values. */\nOPENCV_HAL_IMPL_CMP_OP(!=)\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_ADD_SUB_OP(func, bin_op, cast_op, _Tp2) \\\ntemplate<typename _Tp, int n> \\\ninline v_reg<_Tp2, n> func(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \\\n{ \\\n    typedef _Tp2 rtype; \\\n    v_reg<rtype, n> c; \\\n    for( int i = 0; i < n; i++ ) \\\n        c.s[i] = cast_op(a.s[i] bin_op b.s[i]); \\\n    return c; \\\n}\n\n/** @brief Add values without saturation\n\nFor 8- and 16-bit integer values. */\nOPENCV_HAL_IMPL_ADD_SUB_OP(v_add_wrap, +, (_Tp), _Tp)\n\n/** @brief Subtract values without saturation\n\nFor 8- and 16-bit integer values. */\nOPENCV_HAL_IMPL_ADD_SUB_OP(v_sub_wrap, -, (_Tp), _Tp)\n\n//! @cond IGNORED\ntemplate<typename T> inline T _absdiff(T a, T b)\n{\n    return a > b ? a - b : b - a;\n}\n//! @endcond\n\n/** @brief Absolute difference\n\nReturns \\f$ |a - b| \\f$ converted to corresponding unsigned type.\nExample:\n@code{.cpp}\nv_int32x4 a, b; // {1, 2, 3, 4} and {4, 3, 2, 1}\nv_uint32x4 c = v_absdiff(a, b); // result is {3, 1, 1, 3}\n@endcode\nFor 8-, 16-, 32-bit integer source types. */\ntemplate<typename _Tp, int n>\ninline v_reg<typename V_TypeTraits<_Tp>::abs_type, n> v_absdiff(const v_reg<_Tp, n>& a, const v_reg<_Tp, n> & b)\n{\n    typedef typename V_TypeTraits<_Tp>::abs_type rtype;\n    v_reg<rtype, n> c;\n    const rtype mask = std::numeric_limits<_Tp>::is_signed ? (1 << (sizeof(rtype)*8 - 1)) : 0;\n    for( int i = 0; i < n; i++ )\n    {\n        rtype ua = a.s[i] ^ mask;\n        rtype ub = b.s[i] ^ mask;\n        c.s[i] = _absdiff(ua, ub);\n    }\n    return c;\n}\n\n/** @overload\n\nFor 32-bit floating point values */\ninline v_float32x4 v_absdiff(const v_float32x4& a, const v_float32x4& b)\n{\n    v_float32x4 c;\n    for( int i = 0; i < c.nlanes; i++ )\n        c.s[i] = _absdiff(a.s[i], b.s[i]);\n    return c;\n}\n\n/** @overload\n\nFor 64-bit floating point values */\ninline v_float64x2 v_absdiff(const v_float64x2& a, const v_float64x2& b)\n{\n    v_float64x2 c;\n    for( int i = 0; i < c.nlanes; i++ )\n        c.s[i] = _absdiff(a.s[i], b.s[i]);\n    return c;\n}\n\n/** @brief Inversed square root\n\nReturns \\f$ 1/sqrt(a) \\f$\nFor floating point types only. */\ntemplate<typename _Tp, int n>\ninline v_reg<_Tp, n> v_invsqrt(const v_reg<_Tp, n>& a)\n{\n    v_reg<_Tp, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = 1.f/std::sqrt(a.s[i]);\n    return c;\n}\n\n/** @brief Magnitude\n\nReturns \\f$ sqrt(a^2 + b^2) \\f$\nFor floating point types only. */\ntemplate<typename _Tp, int n>\ninline v_reg<_Tp, n> v_magnitude(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)\n{\n    v_reg<_Tp, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = std::sqrt(a.s[i]*a.s[i] + b.s[i]*b.s[i]);\n    return c;\n}\n\n/** @brief Square of the magnitude\n\nReturns \\f$ a^2 + b^2 \\f$\nFor floating point types only. */\ntemplate<typename _Tp, int n>\ninline v_reg<_Tp, n> v_sqr_magnitude(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)\n{\n    v_reg<_Tp, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = a.s[i]*a.s[i] + b.s[i]*b.s[i];\n    return c;\n}\n\n/** @brief Multiply and add\n\nReturns \\f$ a*b + c \\f$\nFor floating point types only. */\ntemplate<typename _Tp, int n>\ninline v_reg<_Tp, n> v_muladd(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b,\n                              const v_reg<_Tp, n>& c)\n{\n    v_reg<_Tp, n> d;\n    for( int i = 0; i < n; i++ )\n        d.s[i] = a.s[i]*b.s[i] + c.s[i];\n    return d;\n}\n\n/** @brief Dot product of elements\n\nMultiply values in two registers and sum adjacent result pairs.\nScheme:\n@code\n  {A1 A2 ...} // 16-bit\nx {B1 B2 ...} // 16-bit\n-------------\n{A1B1+A2B2 ...} // 32-bit\n@endcode\nImplemented only for 16-bit signed source type (v_int16x8).\n*/\ntemplate<typename _Tp, int n> inline v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>\n    v_dotprod(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)\n{\n    typedef typename V_TypeTraits<_Tp>::w_type w_type;\n    v_reg<w_type, n/2> c;\n    for( int i = 0; i < (n/2); i++ )\n        c.s[i] = (w_type)a.s[i*2]*b.s[i*2] + (w_type)a.s[i*2+1]*b.s[i*2+1];\n    return c;\n}\n\n/** @brief Multiply and expand\n\nMultiply values two registers and store results in two registers with wider pack type.\nScheme:\n@code\n  {A B C D} // 32-bit\nx {E F G H} // 32-bit\n---------------\n{AE BF}         // 64-bit\n        {CG DH} // 64-bit\n@endcode\nExample:\n@code{.cpp}\nv_uint32x4 a, b; // {1,2,3,4} and {2,2,2,2}\nv_uint64x2 c, d; // results\nv_mul_expand(a, b, c, d); // c, d = {2,4}, {6, 8}\n@endcode\nImplemented only for 16- and unsigned 32-bit source types (v_int16x8, v_uint16x8, v_uint32x4).\n*/\ntemplate<typename _Tp, int n> inline void v_mul_expand(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b,\n                                                       v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>& c,\n                                                       v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>& d)\n{\n    typedef typename V_TypeTraits<_Tp>::w_type w_type;\n    for( int i = 0; i < (n/2); i++ )\n    {\n        c.s[i] = (w_type)a.s[i]*b.s[i];\n        d.s[i] = (w_type)a.s[i+(n/2)]*b.s[i+(n/2)];\n    }\n}\n\n//! @cond IGNORED\ntemplate<typename _Tp, int n> inline void v_hsum(const v_reg<_Tp, n>& a,\n                                                 v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>& c)\n{\n    typedef typename V_TypeTraits<_Tp>::w_type w_type;\n    for( int i = 0; i < (n/2); i++ )\n    {\n        c.s[i] = (w_type)a.s[i*2] + a.s[i*2+1];\n    }\n}\n//! @endcond\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_SHIFT_OP(shift_op) \\\ntemplate<typename _Tp, int n> inline v_reg<_Tp, n> operator shift_op(const v_reg<_Tp, n>& a, int imm) \\\n{ \\\n    v_reg<_Tp, n> c; \\\n    for( int i = 0; i < n; i++ ) \\\n        c.s[i] = (_Tp)(a.s[i] shift_op imm); \\\n    return c; \\\n}\n\n/** @brief Bitwise shift left\n\nFor 16-, 32- and 64-bit integer values. */\nOPENCV_HAL_IMPL_SHIFT_OP(<<)\n\n/** @brief Bitwise shift right\n\nFor 16-, 32- and 64-bit integer values. */\nOPENCV_HAL_IMPL_SHIFT_OP(>>)\n\n/** @brief Sum packed values\n\nScheme:\n@code\n{A1 A2 A3 ...} => sum{A1,A2,A3,...}\n@endcode\nFor 32-bit integer and 32-bit floating point types.*/\ntemplate<typename _Tp, int n> inline typename V_TypeTraits<_Tp>::sum_type v_reduce_sum(const v_reg<_Tp, n>& a)\n{\n    typename V_TypeTraits<_Tp>::sum_type c = a.s[0];\n    for( int i = 1; i < n; i++ )\n        c += a.s[i];\n    return c;\n}\n\n/** @brief Get negative values mask\n\nReturned value is a bit mask with bits set to 1 on places corresponding to negative packed values indexes.\nExample:\n@code{.cpp}\nv_int32x4 r; // set to {-1, -1, 1, 1}\nint mask = v_signmask(r); // mask = 3 <== 00000000 00000000 00000000 00000011\n@endcode\nFor all types except 64-bit. */\ntemplate<typename _Tp, int n> inline int v_signmask(const v_reg<_Tp, n>& a)\n{\n    int mask = 0;\n    for( int i = 0; i < n; i++ )\n        mask |= (V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) < 0) << i;\n    return mask;\n}\n\n/** @brief Check if all packed values are less than zero\n\nUnsigned values will be casted to signed: `uchar 254 => char -2`.\nFor all types except 64-bit. */\ntemplate<typename _Tp, int n> inline bool v_check_all(const v_reg<_Tp, n>& a)\n{\n    for( int i = 0; i < n; i++ )\n        if( V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) >= 0 )\n            return false;\n    return true;\n}\n\n/** @brief Check if any of packed values is less than zero\n\nUnsigned values will be casted to signed: `uchar 254 => char -2`.\nFor all types except 64-bit. */\ntemplate<typename _Tp, int n> inline bool v_check_any(const v_reg<_Tp, n>& a)\n{\n    for( int i = 0; i < n; i++ )\n        if( V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) < 0 )\n            return true;\n    return false;\n}\n\n/** @brief Bitwise select\n\nReturn value will be built by combining values a and b using the following scheme:\nIf the i-th bit in _mask_ is 1\n    select i-th bit from _a_\nelse\n    select i-th bit from _b_ */\ntemplate<typename _Tp, int n> inline v_reg<_Tp, n> v_select(const v_reg<_Tp, n>& mask,\n                                                           const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)\n{\n    typedef V_TypeTraits<_Tp> Traits;\n    typedef typename Traits::int_type int_type;\n    v_reg<_Tp, n> c;\n    for( int i = 0; i < n; i++ )\n    {\n        int_type m = Traits::reinterpret_int(mask.s[i]);\n        c.s[i] =  Traits::reinterpret_from_int((Traits::reinterpret_int(a.s[i]) & m)\n                                             | (Traits::reinterpret_int(b.s[i]) & ~m));\n    }\n    return c;\n}\n\n/** @brief Expand values to the wider pack type\n\nCopy contents of register to two registers with 2x wider pack type.\nScheme:\n@code\n int32x4     int64x2 int64x2\n{A B C D} ==> {A B} , {C D}\n@endcode */\ntemplate<typename _Tp, int n> inline void v_expand(const v_reg<_Tp, n>& a,\n                            v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>& b0,\n                            v_reg<typename V_TypeTraits<_Tp>::w_type, n/2>& b1)\n{\n    for( int i = 0; i < (n/2); i++ )\n    {\n        b0.s[i] = a.s[i];\n        b1.s[i] = a.s[i+(n/2)];\n    }\n}\n\n//! @cond IGNORED\ntemplate<typename _Tp, int n> inline v_reg<typename V_TypeTraits<_Tp>::int_type, n>\n    v_reinterpret_as_int(const v_reg<_Tp, n>& a)\n{\n    v_reg<typename V_TypeTraits<_Tp>::int_type, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = V_TypeTraits<_Tp>::reinterpret_int(a.s[i]);\n    return c;\n}\n\ntemplate<typename _Tp, int n> inline v_reg<typename V_TypeTraits<_Tp>::uint_type, n>\n    v_reinterpret_as_uint(const v_reg<_Tp, n>& a)\n{\n    v_reg<typename V_TypeTraits<_Tp>::uint_type, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = V_TypeTraits<_Tp>::reinterpret_uint(a.s[i]);\n    return c;\n}\n//! @endcond\n\n/** @brief Interleave two vectors\n\nScheme:\n@code\n  {A1 A2 A3 A4}\n  {B1 B2 B3 B4}\n---------------\n  {A1 B1 A2 B2} and {A3 B3 A4 B4}\n@endcode\nFor all types except 64-bit.\n*/\ntemplate<typename _Tp, int n> inline void v_zip( const v_reg<_Tp, n>& a0, const v_reg<_Tp, n>& a1,\n                                               v_reg<_Tp, n>& b0, v_reg<_Tp, n>& b1 )\n{\n    int i;\n    for( i = 0; i < n/2; i++ )\n    {\n        b0.s[i*2] = a0.s[i];\n        b0.s[i*2+1] = a1.s[i];\n    }\n    for( ; i < n; i++ )\n    {\n        b1.s[i*2-n] = a0.s[i];\n        b1.s[i*2-n+1] = a1.s[i];\n    }\n}\n\n/** @brief Load register contents from memory\n\n@param ptr pointer to memory block with data\n@return register object\n\n@note Returned type will be detected from passed pointer type, for example uchar ==> cv::v_uint8x16, int ==> cv::v_int32x4, etc.\n */\ntemplate<typename _Tp>\ninline v_reg<_Tp, V_SIMD128Traits<_Tp>::nlanes> v_load(const _Tp* ptr)\n{\n    return v_reg<_Tp, V_SIMD128Traits<_Tp>::nlanes>(ptr);\n}\n\n/** @brief Load register contents from memory (aligned)\n\nsimilar to cv::v_load, but source memory block should be aligned (to 16-byte boundary)\n */\ntemplate<typename _Tp>\ninline v_reg<_Tp, V_SIMD128Traits<_Tp>::nlanes> v_load_aligned(const _Tp* ptr)\n{\n    return v_reg<_Tp, V_SIMD128Traits<_Tp>::nlanes>(ptr);\n}\n\n/** @brief Load register contents from two memory blocks\n\n@param loptr memory block containing data for first half (0..n/2)\n@param hiptr memory block containing data for second half (n/2..n)\n\n@code{.cpp}\nint lo[2] = { 1, 2 }, hi[2] = { 3, 4 };\nv_int32x4 r = v_load_halves(lo, hi);\n@endcode\n */\ntemplate<typename _Tp>\ninline v_reg<_Tp, V_SIMD128Traits<_Tp>::nlanes> v_load_halves(const _Tp* loptr, const _Tp* hiptr)\n{\n    v_reg<_Tp, V_SIMD128Traits<_Tp>::nlanes> c;\n    for( int i = 0; i < c.nlanes/2; i++ )\n    {\n        c.s[i] = loptr[i];\n        c.s[i+c.nlanes/2] = hiptr[i];\n    }\n    return c;\n}\n\n/** @brief Load register contents from memory with double expand\n\nSame as cv::v_load, but result pack type will be 2x wider than memory type.\n\n@code{.cpp}\nshort buf[4] = {1, 2, 3, 4}; // type is int16\nv_int32x4 r = v_load_expand(buf); // r = {1, 2, 3, 4} - type is int32\n@endcode\nFor 8-, 16-, 32-bit integer source types. */\ntemplate<typename _Tp>\ninline v_reg<typename V_TypeTraits<_Tp>::w_type, V_SIMD128Traits<_Tp>::nlanes / 2>\nv_load_expand(const _Tp* ptr)\n{\n    typedef typename V_TypeTraits<_Tp>::w_type w_type;\n    v_reg<w_type, V_SIMD128Traits<w_type>::nlanes> c;\n    for( int i = 0; i < c.nlanes; i++ )\n    {\n        c.s[i] = ptr[i];\n    }\n    return c;\n}\n\n/** @brief Load register contents from memory with quad expand\n\nSame as cv::v_load_expand, but result type is 4 times wider than source.\n@code{.cpp}\nchar buf[4] = {1, 2, 3, 4}; // type is int8\nv_int32x4 r = v_load_q(buf); // r = {1, 2, 3, 4} - type is int32\n@endcode\nFor 8-bit integer source types. */\ntemplate<typename _Tp>\ninline v_reg<typename V_TypeTraits<_Tp>::q_type, V_SIMD128Traits<_Tp>::nlanes / 4>\nv_load_expand_q(const _Tp* ptr)\n{\n    typedef typename V_TypeTraits<_Tp>::q_type q_type;\n    v_reg<q_type, V_SIMD128Traits<q_type>::nlanes> c;\n    for( int i = 0; i < c.nlanes; i++ )\n    {\n        c.s[i] = ptr[i];\n    }\n    return c;\n}\n\n/** @brief Load and deinterleave (4 channels)\n\nLoad data from memory deinterleave and store to 4 registers.\nScheme:\n@code\n{A1 B1 C1 D1 A2 B2 C2 D2 ...} ==> {A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}, {D1 D2 ...}\n@endcode\nFor all types except 64-bit. */\ntemplate<typename _Tp, int n> inline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a,\n                                                            v_reg<_Tp, n>& b, v_reg<_Tp, n>& c)\n{\n    int i, i3;\n    for( i = i3 = 0; i < n; i++, i3 += 3 )\n    {\n        a.s[i] = ptr[i3];\n        b.s[i] = ptr[i3+1];\n        c.s[i] = ptr[i3+2];\n    }\n}\n\n/** @brief Load and deinterleave (3 channels)\n\nLoad data from memory deinterleave and store to 3 registers.\nScheme:\n@code\n{A1 B1 C1 A2 B2 C2 ...} ==> {A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}\n@endcode\nFor all types except 64-bit. */\ntemplate<typename _Tp, int n>\ninline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a,\n                                v_reg<_Tp, n>& b, v_reg<_Tp, n>& c,\n                                v_reg<_Tp, n>& d)\n{\n    int i, i4;\n    for( i = i4 = 0; i < n; i++, i4 += 4 )\n    {\n        a.s[i] = ptr[i4];\n        b.s[i] = ptr[i4+1];\n        c.s[i] = ptr[i4+2];\n        d.s[i] = ptr[i4+3];\n    }\n}\n\n/** @brief Interleave and store (3 channels)\n\nInterleave and store data from 3 registers to memory.\nScheme:\n@code\n{A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}, {D1 D2 ...} ==> {A1 B1 C1 D1 A2 B2 C2 D2 ...}\n@endcode\nFor all types except 64-bit. */\ntemplate<typename _Tp, int n>\ninline void v_store_interleave( _Tp* ptr, const v_reg<_Tp, n>& a,\n                                const v_reg<_Tp, n>& b, const v_reg<_Tp, n>& c)\n{\n    int i, i3;\n    for( i = i3 = 0; i < n; i++, i3 += 3 )\n    {\n        ptr[i3] = a.s[i];\n        ptr[i3+1] = b.s[i];\n        ptr[i3+2] = c.s[i];\n    }\n}\n\n/** @brief Interleave and store (4 channels)\n\nInterleave and store data from 4 registers to memory.\nScheme:\n@code\n{A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}, {D1 D2 ...} ==> {A1 B1 C1 D1 A2 B2 C2 D2 ...}\n@endcode\nFor all types except 64-bit. */\ntemplate<typename _Tp, int n> inline void v_store_interleave( _Tp* ptr, const v_reg<_Tp, n>& a,\n                                                            const v_reg<_Tp, n>& b, const v_reg<_Tp, n>& c,\n                                                            const v_reg<_Tp, n>& d)\n{\n    int i, i4;\n    for( i = i4 = 0; i < n; i++, i4 += 4 )\n    {\n        ptr[i4] = a.s[i];\n        ptr[i4+1] = b.s[i];\n        ptr[i4+2] = c.s[i];\n        ptr[i4+3] = d.s[i];\n    }\n}\n\n/** @brief Store data to memory\n\nStore register contents to memory.\nScheme:\n@code\n  REG {A B C D} ==> MEM {A B C D}\n@endcode\nPointer can be unaligned. */\ntemplate<typename _Tp, int n>\ninline void v_store(_Tp* ptr, const v_reg<_Tp, n>& a)\n{\n    for( int i = 0; i < n; i++ )\n        ptr[i] = a.s[i];\n}\n\n/** @brief Store data to memory (lower half)\n\nStore lower half of register contents to memory.\nScheme:\n@code\n  REG {A B C D} ==> MEM {A B}\n@endcode */\ntemplate<typename _Tp, int n>\ninline void v_store_low(_Tp* ptr, const v_reg<_Tp, n>& a)\n{\n    for( int i = 0; i < (n/2); i++ )\n        ptr[i] = a.s[i];\n}\n\n/** @brief Store data to memory (higher half)\n\nStore higher half of register contents to memory.\nScheme:\n@code\n  REG {A B C D} ==> MEM {C D}\n@endcode */\ntemplate<typename _Tp, int n>\ninline void v_store_high(_Tp* ptr, const v_reg<_Tp, n>& a)\n{\n    for( int i = 0; i < (n/2); i++ )\n        ptr[i] = a.s[i+(n/2)];\n}\n\n/** @brief Store data to memory (aligned)\n\nStore register contents to memory.\nScheme:\n@code\n  REG {A B C D} ==> MEM {A B C D}\n@endcode\nPointer __should__ be aligned by 16-byte boundary. */\ntemplate<typename _Tp, int n>\ninline void v_store_aligned(_Tp* ptr, const v_reg<_Tp, n>& a)\n{\n    for( int i = 0; i < n; i++ )\n        ptr[i] = a.s[i];\n}\n\n/** @brief Combine vector from first elements of two vectors\n\nScheme:\n@code\n  {A1 A2 A3 A4}\n  {B1 B2 B3 B4}\n---------------\n  {A1 A2 B1 B2}\n@endcode\nFor all types except 64-bit. */\ntemplate<typename _Tp, int n>\ninline v_reg<_Tp, n> v_combine_low(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)\n{\n    v_reg<_Tp, n> c;\n    for( int i = 0; i < (n/2); i++ )\n    {\n        c.s[i] = a.s[i];\n        c.s[i+(n/2)] = b.s[i];\n    }\n    return c;\n}\n\n/** @brief Combine vector from last elements of two vectors\n\nScheme:\n@code\n  {A1 A2 A3 A4}\n  {B1 B2 B3 B4}\n---------------\n  {A3 A4 B3 B4}\n@endcode\nFor all types except 64-bit. */\ntemplate<typename _Tp, int n>\ninline v_reg<_Tp, n> v_combine_high(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)\n{\n    v_reg<_Tp, n> c;\n    for( int i = 0; i < (n/2); i++ )\n    {\n        c.s[i] = a.s[i+(n/2)];\n        c.s[i+(n/2)] = b.s[i+(n/2)];\n    }\n    return c;\n}\n\n/** @brief Combine two vectors from lower and higher parts of two other vectors\n\n@code{.cpp}\nlow = cv::v_combine_low(a, b);\nhigh = cv::v_combine_high(a, b);\n@endcode */\ntemplate<typename _Tp, int n>\ninline void v_recombine(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b,\n                        v_reg<_Tp, n>& low, v_reg<_Tp, n>& high)\n{\n    for( int i = 0; i < (n/2); i++ )\n    {\n        low.s[i] = a.s[i];\n        low.s[i+(n/2)] = b.s[i];\n        high.s[i] = a.s[i+(n/2)];\n        high.s[i+(n/2)] = b.s[i+(n/2)];\n    }\n}\n\n/** @brief Vector extract\n\nScheme:\n@code\n  {A1 A2 A3 A4}\n  {B1 B2 B3 B4}\n========================\nshift = 1  {A2 A3 A4 B1}\nshift = 2  {A3 A4 B1 B2}\nshift = 3  {A4 B1 B2 B3}\n@endcode\nRestriction: 0 <= shift < nlanes\n\nUsage:\n@code\nv_int32x4 a, b, c;\nc = v_extract<2>(a, b);\n@endcode\nFor integer types only. */\ntemplate<int s, typename _Tp, int n>\ninline v_reg<_Tp, n> v_extract(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)\n{\n    v_reg<_Tp, n> r;\n    const int shift = n - s;\n    int i = 0;\n    for (; i < shift; ++i)\n        r.s[i] = a.s[i+s];\n    for (; i < n; ++i)\n        r.s[i] = b.s[i-shift];\n    return r;\n}\n\n/** @brief Round\n\nRounds each value. Input type is float vector ==> output type is int vector.*/\ntemplate<int n> inline v_reg<int, n> v_round(const v_reg<float, n>& a)\n{\n    v_reg<int, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = cvRound(a.s[i]);\n    return c;\n}\n\n/** @brief Floor\n\nFloor each value. Input type is float vector ==> output type is int vector.*/\ntemplate<int n> inline v_reg<int, n> v_floor(const v_reg<float, n>& a)\n{\n    v_reg<int, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = cvFloor(a.s[i]);\n    return c;\n}\n\n/** @brief Ceil\n\nCeil each value. Input type is float vector ==> output type is int vector.*/\ntemplate<int n> inline v_reg<int, n> v_ceil(const v_reg<float, n>& a)\n{\n    v_reg<int, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = cvCeil(a.s[i]);\n    return c;\n}\n\n/** @brief Trunc\n\nTruncate each value. Input type is float vector ==> output type is int vector.*/\ntemplate<int n> inline v_reg<int, n> v_trunc(const v_reg<float, n>& a)\n{\n    v_reg<int, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = (int)(a.s[i]);\n    return c;\n}\n\n/** @overload */\ntemplate<int n> inline v_reg<int, n*2> v_round(const v_reg<double, n>& a)\n{\n    v_reg<int, n*2> c;\n    for( int i = 0; i < n; i++ )\n    {\n        c.s[i] = cvRound(a.s[i]);\n        c.s[i+n] = 0;\n    }\n    return c;\n}\n\n/** @overload */\ntemplate<int n> inline v_reg<int, n*2> v_floor(const v_reg<double, n>& a)\n{\n    v_reg<int, n> c;\n    for( int i = 0; i < n; i++ )\n    {\n        c.s[i] = cvFloor(a.s[i]);\n        c.s[i+n] = 0;\n    }\n    return c;\n}\n\n/** @overload */\ntemplate<int n> inline v_reg<int, n*2> v_ceil(const v_reg<double, n>& a)\n{\n    v_reg<int, n> c;\n    for( int i = 0; i < n; i++ )\n    {\n        c.s[i] = cvCeil(a.s[i]);\n        c.s[i+n] = 0;\n    }\n    return c;\n}\n\n/** @overload */\ntemplate<int n> inline v_reg<int, n*2> v_trunc(const v_reg<double, n>& a)\n{\n    v_reg<int, n> c;\n    for( int i = 0; i < n; i++ )\n    {\n        c.s[i] = cvCeil(a.s[i]);\n        c.s[i+n] = 0;\n    }\n    return c;\n}\n\n/** @brief Convert to float\n\nSupported input type is cv::v_int32x4. */\ntemplate<int n> inline v_reg<float, n> v_cvt_f32(const v_reg<int, n>& a)\n{\n    v_reg<float, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = (float)a.s[i];\n    return c;\n}\n\n/** @brief Convert to double\n\nSupported input type is cv::v_int32x4. */\ntemplate<int n> inline v_reg<double, n> v_cvt_f64(const v_reg<int, n*2>& a)\n{\n    v_reg<double, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = (double)a.s[i];\n    return c;\n}\n\n/** @brief Convert to double\n\nSupported input type is cv::v_float32x4. */\ntemplate<int n> inline v_reg<double, n> v_cvt_f64(const v_reg<float, n*2>& a)\n{\n    v_reg<double, n> c;\n    for( int i = 0; i < n; i++ )\n        c.s[i] = (double)a.s[i];\n    return c;\n}\n\n/** @brief Transpose 4x4 matrix\n\nScheme:\n@code\na0  {A1 A2 A3 A4}\na1  {B1 B2 B3 B4}\na2  {C1 C2 C3 C4}\na3  {D1 D2 D3 D4}\n===============\nb0  {A1 B1 C1 D1}\nb1  {A2 B2 C2 D2}\nb2  {A3 B3 C3 D3}\nb3  {A4 B4 C4 D4}\n@endcode\n*/\ntemplate<typename _Tp>\ninline void v_transpose4x4( v_reg<_Tp, 4>& a0, const v_reg<_Tp, 4>& a1,\n                            const v_reg<_Tp, 4>& a2, const v_reg<_Tp, 4>& a3,\n                            v_reg<_Tp, 4>& b0, v_reg<_Tp, 4>& b1,\n                            v_reg<_Tp, 4>& b2, v_reg<_Tp, 4>& b3 )\n{\n    b0 = v_reg<_Tp, 4>(a0.s[0], a1.s[0], a2.s[0], a3.s[0]);\n    b1 = v_reg<_Tp, 4>(a0.s[1], a1.s[1], a2.s[1], a3.s[1]);\n    b2 = v_reg<_Tp, 4>(a0.s[2], a1.s[2], a2.s[2], a3.s[2]);\n    b3 = v_reg<_Tp, 4>(a0.s[3], a1.s[3], a2.s[3], a3.s[3]);\n}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_C_INIT_ZERO(_Tpvec, _Tp, suffix) \\\ninline _Tpvec v_setzero_##suffix() { return _Tpvec::zero(); }\n\n//! @name Init with zero\n//! @{\n//! @brief Create new vector with zero elements\nOPENCV_HAL_IMPL_C_INIT_ZERO(v_uint8x16, uchar, u8)\nOPENCV_HAL_IMPL_C_INIT_ZERO(v_int8x16, schar, s8)\nOPENCV_HAL_IMPL_C_INIT_ZERO(v_uint16x8, ushort, u16)\nOPENCV_HAL_IMPL_C_INIT_ZERO(v_int16x8, short, s16)\nOPENCV_HAL_IMPL_C_INIT_ZERO(v_uint32x4, unsigned, u32)\nOPENCV_HAL_IMPL_C_INIT_ZERO(v_int32x4, int, s32)\nOPENCV_HAL_IMPL_C_INIT_ZERO(v_float32x4, float, f32)\nOPENCV_HAL_IMPL_C_INIT_ZERO(v_float64x2, double, f64)\nOPENCV_HAL_IMPL_C_INIT_ZERO(v_uint64x2, uint64, u64)\nOPENCV_HAL_IMPL_C_INIT_ZERO(v_int64x2, int64, s64)\n//! @}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_C_INIT_VAL(_Tpvec, _Tp, suffix) \\\ninline _Tpvec v_setall_##suffix(_Tp val) { return _Tpvec::all(val); }\n\n//! @name Init with value\n//! @{\n//! @brief Create new vector with elements set to a specific value\nOPENCV_HAL_IMPL_C_INIT_VAL(v_uint8x16, uchar, u8)\nOPENCV_HAL_IMPL_C_INIT_VAL(v_int8x16, schar, s8)\nOPENCV_HAL_IMPL_C_INIT_VAL(v_uint16x8, ushort, u16)\nOPENCV_HAL_IMPL_C_INIT_VAL(v_int16x8, short, s16)\nOPENCV_HAL_IMPL_C_INIT_VAL(v_uint32x4, unsigned, u32)\nOPENCV_HAL_IMPL_C_INIT_VAL(v_int32x4, int, s32)\nOPENCV_HAL_IMPL_C_INIT_VAL(v_float32x4, float, f32)\nOPENCV_HAL_IMPL_C_INIT_VAL(v_float64x2, double, f64)\nOPENCV_HAL_IMPL_C_INIT_VAL(v_uint64x2, uint64, u64)\nOPENCV_HAL_IMPL_C_INIT_VAL(v_int64x2, int64, s64)\n//! @}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_C_REINTERPRET(_Tpvec, _Tp, suffix) \\\ntemplate<typename _Tp0, int n0> inline _Tpvec \\\n    v_reinterpret_as_##suffix(const v_reg<_Tp0, n0>& a) \\\n{ return a.template reinterpret_as<_Tp, _Tpvec::nlanes>(); }\n\n//! @name Reinterpret\n//! @{\n//! @brief Convert vector to different type without modifying underlying data.\nOPENCV_HAL_IMPL_C_REINTERPRET(v_uint8x16, uchar, u8)\nOPENCV_HAL_IMPL_C_REINTERPRET(v_int8x16, schar, s8)\nOPENCV_HAL_IMPL_C_REINTERPRET(v_uint16x8, ushort, u16)\nOPENCV_HAL_IMPL_C_REINTERPRET(v_int16x8, short, s16)\nOPENCV_HAL_IMPL_C_REINTERPRET(v_uint32x4, unsigned, u32)\nOPENCV_HAL_IMPL_C_REINTERPRET(v_int32x4, int, s32)\nOPENCV_HAL_IMPL_C_REINTERPRET(v_float32x4, float, f32)\nOPENCV_HAL_IMPL_C_REINTERPRET(v_float64x2, double, f64)\nOPENCV_HAL_IMPL_C_REINTERPRET(v_uint64x2, uint64, u64)\nOPENCV_HAL_IMPL_C_REINTERPRET(v_int64x2, int64, s64)\n//! @}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_C_SHIFTL(_Tpvec, _Tp) \\\ntemplate<int n> inline _Tpvec v_shl(const _Tpvec& a) \\\n{ return a << n; }\n\n//! @name Left shift\n//! @{\n//! @brief Shift left\nOPENCV_HAL_IMPL_C_SHIFTL(v_uint16x8, ushort)\nOPENCV_HAL_IMPL_C_SHIFTL(v_int16x8, short)\nOPENCV_HAL_IMPL_C_SHIFTL(v_uint32x4, unsigned)\nOPENCV_HAL_IMPL_C_SHIFTL(v_int32x4, int)\nOPENCV_HAL_IMPL_C_SHIFTL(v_uint64x2, uint64)\nOPENCV_HAL_IMPL_C_SHIFTL(v_int64x2, int64)\n//! @}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_C_SHIFTR(_Tpvec, _Tp) \\\ntemplate<int n> inline _Tpvec v_shr(const _Tpvec& a) \\\n{ return a >> n; }\n\n//! @name Right shift\n//! @{\n//! @brief Shift right\nOPENCV_HAL_IMPL_C_SHIFTR(v_uint16x8, ushort)\nOPENCV_HAL_IMPL_C_SHIFTR(v_int16x8, short)\nOPENCV_HAL_IMPL_C_SHIFTR(v_uint32x4, unsigned)\nOPENCV_HAL_IMPL_C_SHIFTR(v_int32x4, int)\nOPENCV_HAL_IMPL_C_SHIFTR(v_uint64x2, uint64)\nOPENCV_HAL_IMPL_C_SHIFTR(v_int64x2, int64)\n//! @}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_C_RSHIFTR(_Tpvec, _Tp) \\\ntemplate<int n> inline _Tpvec v_rshr(const _Tpvec& a) \\\n{ \\\n    _Tpvec c; \\\n    for( int i = 0; i < _Tpvec::nlanes; i++ ) \\\n        c.s[i] = (_Tp)((a.s[i] + ((_Tp)1 << (n - 1))) >> n); \\\n    return c; \\\n}\n\n//! @name Rounding shift\n//! @{\n//! @brief Rounding shift right\nOPENCV_HAL_IMPL_C_RSHIFTR(v_uint16x8, ushort)\nOPENCV_HAL_IMPL_C_RSHIFTR(v_int16x8, short)\nOPENCV_HAL_IMPL_C_RSHIFTR(v_uint32x4, unsigned)\nOPENCV_HAL_IMPL_C_RSHIFTR(v_int32x4, int)\nOPENCV_HAL_IMPL_C_RSHIFTR(v_uint64x2, uint64)\nOPENCV_HAL_IMPL_C_RSHIFTR(v_int64x2, int64)\n//! @}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_C_PACK(_Tpvec, _Tpnvec, _Tpn, pack_suffix) \\\ninline _Tpnvec v_##pack_suffix(const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    _Tpnvec c; \\\n    for( int i = 0; i < _Tpvec::nlanes; i++ ) \\\n    { \\\n        c.s[i] = saturate_cast<_Tpn>(a.s[i]); \\\n        c.s[i+_Tpvec::nlanes] = saturate_cast<_Tpn>(b.s[i]); \\\n    } \\\n    return c; \\\n}\n\n//! @name Pack\n//! @{\n//! @brief Pack values from two vectors to one\n//!\n//! Return vector type have twice more elements than input vector types. Variant with _u_ suffix also\n//! converts to corresponding unsigned type.\n//!\n//! - pack: for 16-, 32- and 64-bit integer input types\n//! - pack_u: for 16- and 32-bit signed integer input types\nOPENCV_HAL_IMPL_C_PACK(v_uint16x8, v_uint8x16, uchar, pack)\nOPENCV_HAL_IMPL_C_PACK(v_int16x8, v_int8x16, schar, pack)\nOPENCV_HAL_IMPL_C_PACK(v_uint32x4, v_uint16x8, ushort, pack)\nOPENCV_HAL_IMPL_C_PACK(v_int32x4, v_int16x8, short, pack)\nOPENCV_HAL_IMPL_C_PACK(v_uint64x2, v_uint32x4, unsigned, pack)\nOPENCV_HAL_IMPL_C_PACK(v_int64x2, v_int32x4, int, pack)\nOPENCV_HAL_IMPL_C_PACK(v_int16x8, v_uint8x16, uchar, pack_u)\nOPENCV_HAL_IMPL_C_PACK(v_int32x4, v_uint16x8, ushort, pack_u)\n//! @}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_C_RSHR_PACK(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix) \\\ntemplate<int n> inline _Tpnvec v_rshr_##pack_suffix(const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    _Tpnvec c; \\\n    for( int i = 0; i < _Tpvec::nlanes; i++ ) \\\n    { \\\n        c.s[i] = saturate_cast<_Tpn>((a.s[i] + ((_Tp)1 << (n - 1))) >> n); \\\n        c.s[i+_Tpvec::nlanes] = saturate_cast<_Tpn>((b.s[i] + ((_Tp)1 << (n - 1))) >> n); \\\n    } \\\n    return c; \\\n}\n\n//! @name Pack with rounding shift\n//! @{\n//! @brief Pack values from two vectors to one with rounding shift\n//!\n//! Values from the input vectors will be shifted right by _n_ bits with rounding, converted to narrower\n//! type and returned in the result vector. Variant with _u_ suffix converts to unsigned type.\n//!\n//! - pack: for 16-, 32- and 64-bit integer input types\n//! - pack_u: for 16- and 32-bit signed integer input types\nOPENCV_HAL_IMPL_C_RSHR_PACK(v_uint16x8, ushort, v_uint8x16, uchar, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK(v_int16x8, short, v_int8x16, schar, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK(v_uint32x4, unsigned, v_uint16x8, ushort, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK(v_int32x4, int, v_int16x8, short, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK(v_uint64x2, uint64, v_uint32x4, unsigned, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK(v_int64x2, int64, v_int32x4, int, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK(v_int16x8, short, v_uint8x16, uchar, pack_u)\nOPENCV_HAL_IMPL_C_RSHR_PACK(v_int32x4, int, v_uint16x8, ushort, pack_u)\n//! @}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_C_PACK_STORE(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix) \\\ninline void v_##pack_suffix##_store(_Tpn* ptr, const _Tpvec& a) \\\n{ \\\n    for( int i = 0; i < _Tpvec::nlanes; i++ ) \\\n        ptr[i] = saturate_cast<_Tpn>(a.s[i]); \\\n}\n\n//! @name Pack and store\n//! @{\n//! @brief Store values from the input vector into memory with pack\n//!\n//! Values will be stored into memory with saturating conversion to narrower type.\n//! Variant with _u_ suffix converts to corresponding unsigned type.\n//!\n//! - pack: for 16-, 32- and 64-bit integer input types\n//! - pack_u: for 16- and 32-bit signed integer input types\nOPENCV_HAL_IMPL_C_PACK_STORE(v_uint16x8, ushort, v_uint8x16, uchar, pack)\nOPENCV_HAL_IMPL_C_PACK_STORE(v_int16x8, short, v_int8x16, schar, pack)\nOPENCV_HAL_IMPL_C_PACK_STORE(v_uint32x4, unsigned, v_uint16x8, ushort, pack)\nOPENCV_HAL_IMPL_C_PACK_STORE(v_int32x4, int, v_int16x8, short, pack)\nOPENCV_HAL_IMPL_C_PACK_STORE(v_uint64x2, uint64, v_uint32x4, unsigned, pack)\nOPENCV_HAL_IMPL_C_PACK_STORE(v_int64x2, int64, v_int32x4, int, pack)\nOPENCV_HAL_IMPL_C_PACK_STORE(v_int16x8, short, v_uint8x16, uchar, pack_u)\nOPENCV_HAL_IMPL_C_PACK_STORE(v_int32x4, int, v_uint16x8, ushort, pack_u)\n//! @}\n\n//! @brief Helper macro\n//! @ingroup core_hal_intrin_impl\n#define OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix) \\\ntemplate<int n> inline void v_rshr_##pack_suffix##_store(_Tpn* ptr, const _Tpvec& a) \\\n{ \\\n    for( int i = 0; i < _Tpvec::nlanes; i++ ) \\\n        ptr[i] = saturate_cast<_Tpn>((a.s[i] + ((_Tp)1 << (n - 1))) >> n); \\\n}\n\n//! @name Pack and store with rounding shift\n//! @{\n//! @brief Store values from the input vector into memory with pack\n//!\n//! Values will be shifted _n_ bits right with rounding, converted to narrower type and stored into\n//! memory. Variant with _u_ suffix converts to unsigned type.\n//!\n//! - pack: for 16-, 32- and 64-bit integer input types\n//! - pack_u: for 16- and 32-bit signed integer input types\nOPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint16x8, ushort, v_uint8x16, uchar, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int16x8, short, v_int8x16, schar, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint32x4, unsigned, v_uint16x8, ushort, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int32x4, int, v_int16x8, short, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint64x2, uint64, v_uint32x4, unsigned, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int64x2, int64, v_int32x4, int, pack)\nOPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int16x8, short, v_uint8x16, uchar, pack_u)\nOPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int32x4, int, v_uint16x8, ushort, pack_u)\n//! @}\n\n/** @brief Matrix multiplication\n\nScheme:\n@code\n{A0 A1 A2 A3}   |V0|\n{B0 B1 B2 B3}   |V1|\n{C0 C1 C2 C3}   |V2|\n{D0 D1 D2 D3} x |V3|\n====================\n{R0 R1 R2 R3}, where:\nR0 = A0V0 + A1V1 + A2V2 + A3V3,\nR1 = B0V0 + B1V1 + B2V2 + B3V3\n...\n@endcode\n*/\ninline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,\n                            const v_float32x4& m1, const v_float32x4& m2,\n                            const v_float32x4& m3)\n{\n    return v_float32x4(v.s[0]*m0.s[0] + v.s[1]*m1.s[0] + v.s[2]*m2.s[0] + v.s[3]*m3.s[0],\n                       v.s[0]*m0.s[1] + v.s[1]*m1.s[1] + v.s[2]*m2.s[1] + v.s[3]*m3.s[1],\n                       v.s[0]*m0.s[2] + v.s[1]*m1.s[2] + v.s[2]*m2.s[2] + v.s[3]*m3.s[2],\n                       v.s[0]*m0.s[3] + v.s[1]*m1.s[3] + v.s[2]*m2.s[3] + v.s[3]*m3.s[3]);\n}\n\n//! @}\n\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/hal/intrin_neon.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_HAL_INTRIN_NEON_HPP__\n#define __OPENCV_HAL_INTRIN_NEON_HPP__\n\n#include <algorithm>\n\nnamespace cv\n{\n\n//! @cond IGNORED\n\n#define CV_SIMD128 1\n\nstruct v_uint8x16\n{\n    typedef uchar lane_type;\n    enum { nlanes = 16 };\n\n    v_uint8x16() {}\n    explicit v_uint8x16(uint8x16_t v) : val(v) {}\n    v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7,\n               uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15)\n    {\n        uchar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15};\n        val = vld1q_u8(v);\n    }\n    uchar get0() const\n    {\n        return vgetq_lane_u8(val, 0);\n    }\n\n    uint8x16_t val;\n};\n\nstruct v_int8x16\n{\n    typedef schar lane_type;\n    enum { nlanes = 16 };\n\n    v_int8x16() {}\n    explicit v_int8x16(int8x16_t v) : val(v) {}\n    v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7,\n               schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15)\n    {\n        schar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15};\n        val = vld1q_s8(v);\n    }\n    schar get0() const\n    {\n        return vgetq_lane_s8(val, 0);\n    }\n\n    int8x16_t val;\n};\n\nstruct v_uint16x8\n{\n    typedef ushort lane_type;\n    enum { nlanes = 8 };\n\n    v_uint16x8() {}\n    explicit v_uint16x8(uint16x8_t v) : val(v) {}\n    v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7)\n    {\n        ushort v[] = {v0, v1, v2, v3, v4, v5, v6, v7};\n        val = vld1q_u16(v);\n    }\n    ushort get0() const\n    {\n        return vgetq_lane_u16(val, 0);\n    }\n\n    uint16x8_t val;\n};\n\nstruct v_int16x8\n{\n    typedef short lane_type;\n    enum { nlanes = 8 };\n\n    v_int16x8() {}\n    explicit v_int16x8(int16x8_t v) : val(v) {}\n    v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)\n    {\n        short v[] = {v0, v1, v2, v3, v4, v5, v6, v7};\n        val = vld1q_s16(v);\n    }\n    short get0() const\n    {\n        return vgetq_lane_s16(val, 0);\n    }\n\n    int16x8_t val;\n};\n\nstruct v_uint32x4\n{\n    typedef unsigned lane_type;\n    enum { nlanes = 4 };\n\n    v_uint32x4() {}\n    explicit v_uint32x4(uint32x4_t v) : val(v) {}\n    v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3)\n    {\n        unsigned v[] = {v0, v1, v2, v3};\n        val = vld1q_u32(v);\n    }\n    unsigned get0() const\n    {\n        return vgetq_lane_u32(val, 0);\n    }\n\n    uint32x4_t val;\n};\n\nstruct v_int32x4\n{\n    typedef int lane_type;\n    enum { nlanes = 4 };\n\n    v_int32x4() {}\n    explicit v_int32x4(int32x4_t v) : val(v) {}\n    v_int32x4(int v0, int v1, int v2, int v3)\n    {\n        int v[] = {v0, v1, v2, v3};\n        val = vld1q_s32(v);\n    }\n    int get0() const\n    {\n        return vgetq_lane_s32(val, 0);\n    }\n    int32x4_t val;\n};\n\nstruct v_float32x4\n{\n    typedef float lane_type;\n    enum { nlanes = 4 };\n\n    v_float32x4() {}\n    explicit v_float32x4(float32x4_t v) : val(v) {}\n    v_float32x4(float v0, float v1, float v2, float v3)\n    {\n        float v[] = {v0, v1, v2, v3};\n        val = vld1q_f32(v);\n    }\n    float get0() const\n    {\n        return vgetq_lane_f32(val, 0);\n    }\n    float32x4_t val;\n};\n\nstruct v_uint64x2\n{\n    typedef uint64 lane_type;\n    enum { nlanes = 2 };\n\n    v_uint64x2() {}\n    explicit v_uint64x2(uint64x2_t v) : val(v) {}\n    v_uint64x2(unsigned v0, unsigned v1)\n    {\n        uint64 v[] = {v0, v1};\n        val = vld1q_u64(v);\n    }\n    uint64 get0() const\n    {\n        return vgetq_lane_u64(val, 0);\n    }\n    uint64x2_t val;\n};\n\nstruct v_int64x2\n{\n    typedef int64 lane_type;\n    enum { nlanes = 2 };\n\n    v_int64x2() {}\n    explicit v_int64x2(int64x2_t v) : val(v) {}\n    v_int64x2(int v0, int v1)\n    {\n        int64 v[] = {v0, v1};\n        val = vld1q_s64(v);\n    }\n    int64 get0() const\n    {\n        return vgetq_lane_s64(val, 0);\n    }\n    int64x2_t val;\n};\n\n#define OPENCV_HAL_IMPL_NEON_INIT(_Tpv, _Tp, suffix) \\\ninline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(vdupq_n_##suffix((_Tp)0)); } \\\ninline v_##_Tpv v_setall_##suffix(_Tp v) { return v_##_Tpv(vdupq_n_##suffix(v)); } \\\ninline _Tpv##_t vreinterpretq_##suffix##_##suffix(_Tpv##_t v) { return v; } \\\ninline v_uint8x16 v_reinterpret_as_u8(const v_##_Tpv& v) { return v_uint8x16(vreinterpretq_u8_##suffix(v.val)); } \\\ninline v_int8x16 v_reinterpret_as_s8(const v_##_Tpv& v) { return v_int8x16(vreinterpretq_s8_##suffix(v.val)); } \\\ninline v_uint16x8 v_reinterpret_as_u16(const v_##_Tpv& v) { return v_uint16x8(vreinterpretq_u16_##suffix(v.val)); } \\\ninline v_int16x8 v_reinterpret_as_s16(const v_##_Tpv& v) { return v_int16x8(vreinterpretq_s16_##suffix(v.val)); } \\\ninline v_uint32x4 v_reinterpret_as_u32(const v_##_Tpv& v) { return v_uint32x4(vreinterpretq_u32_##suffix(v.val)); } \\\ninline v_int32x4 v_reinterpret_as_s32(const v_##_Tpv& v) { return v_int32x4(vreinterpretq_s32_##suffix(v.val)); } \\\ninline v_uint64x2 v_reinterpret_as_u64(const v_##_Tpv& v) { return v_uint64x2(vreinterpretq_u64_##suffix(v.val)); } \\\ninline v_int64x2 v_reinterpret_as_s64(const v_##_Tpv& v) { return v_int64x2(vreinterpretq_s64_##suffix(v.val)); } \\\ninline v_float32x4 v_reinterpret_as_f32(const v_##_Tpv& v) { return v_float32x4(vreinterpretq_f32_##suffix(v.val)); }\n\nOPENCV_HAL_IMPL_NEON_INIT(uint8x16, uchar, u8)\nOPENCV_HAL_IMPL_NEON_INIT(int8x16, schar, s8)\nOPENCV_HAL_IMPL_NEON_INIT(uint16x8, ushort, u16)\nOPENCV_HAL_IMPL_NEON_INIT(int16x8, short, s16)\nOPENCV_HAL_IMPL_NEON_INIT(uint32x4, unsigned, u32)\nOPENCV_HAL_IMPL_NEON_INIT(int32x4, int, s32)\nOPENCV_HAL_IMPL_NEON_INIT(uint64x2, uint64, u64)\nOPENCV_HAL_IMPL_NEON_INIT(int64x2, int64, s64)\nOPENCV_HAL_IMPL_NEON_INIT(float32x4, float, f32)\n\n#define OPENCV_HAL_IMPL_NEON_PACK(_Tpvec, _Tp, hreg, suffix, _Tpwvec, wsuffix, pack, op) \\\ninline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \\\n{ \\\n    hreg a1 = vqmov##op##_##wsuffix(a.val), b1 = vqmov##op##_##wsuffix(b.val); \\\n    return _Tpvec(vcombine_##suffix(a1, b1)); \\\n} \\\ninline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \\\n{ \\\n    hreg a1 = vqmov##op##_##wsuffix(a.val); \\\n    vst1_##suffix(ptr, a1); \\\n} \\\ntemplate<int n> inline \\\n_Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \\\n{ \\\n    hreg a1 = vqrshr##op##_n_##wsuffix(a.val, n); \\\n    hreg b1 = vqrshr##op##_n_##wsuffix(b.val, n); \\\n    return _Tpvec(vcombine_##suffix(a1, b1)); \\\n} \\\ntemplate<int n> inline \\\nvoid v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \\\n{ \\\n    hreg a1 = vqrshr##op##_n_##wsuffix(a.val, n); \\\n    vst1_##suffix(ptr, a1); \\\n}\n\nOPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_uint16x8, u16, pack, n)\nOPENCV_HAL_IMPL_NEON_PACK(v_int8x16, schar, int8x8_t, s8, v_int16x8, s16, pack, n)\nOPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_uint32x4, u32, pack, n)\nOPENCV_HAL_IMPL_NEON_PACK(v_int16x8, short, int16x4_t, s16, v_int32x4, s32, pack, n)\nOPENCV_HAL_IMPL_NEON_PACK(v_uint32x4, unsigned, uint32x2_t, u32, v_uint64x2, u64, pack, n)\nOPENCV_HAL_IMPL_NEON_PACK(v_int32x4, int, int32x2_t, s32, v_int64x2, s64, pack, n)\n\nOPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_int16x8, s16, pack_u, un)\nOPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_int32x4, s32, pack_u, un)\n\ninline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,\n                            const v_float32x4& m1, const v_float32x4& m2,\n                            const v_float32x4& m3)\n{\n    float32x2_t vl = vget_low_f32(v.val), vh = vget_high_f32(v.val);\n    float32x4_t res = vmulq_lane_f32(m0.val, vl, 0);\n    res = vmlaq_lane_f32(res, m1.val, vl, 1);\n    res = vmlaq_lane_f32(res, m2.val, vh, 0);\n    res = vmlaq_lane_f32(res, m3.val, vh, 1);\n    return v_float32x4(res);\n}\n\n#define OPENCV_HAL_IMPL_NEON_BIN_OP(bin_op, _Tpvec, intrin) \\\ninline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    return _Tpvec(intrin(a.val, b.val)); \\\n} \\\ninline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \\\n{ \\\n    a.val = intrin(a.val, b.val); \\\n    return a; \\\n}\n\nOPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint8x16, vqaddq_u8)\nOPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint8x16, vqsubq_u8)\nOPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int8x16, vqaddq_s8)\nOPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int8x16, vqsubq_s8)\nOPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint16x8, vqaddq_u16)\nOPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint16x8, vqsubq_u16)\nOPENCV_HAL_IMPL_NEON_BIN_OP(*, v_uint16x8, vmulq_u16)\nOPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int16x8, vqaddq_s16)\nOPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int16x8, vqsubq_s16)\nOPENCV_HAL_IMPL_NEON_BIN_OP(*, v_int16x8, vmulq_s16)\nOPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int32x4, vaddq_s32)\nOPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int32x4, vsubq_s32)\nOPENCV_HAL_IMPL_NEON_BIN_OP(*, v_int32x4, vmulq_s32)\nOPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint32x4, vaddq_u32)\nOPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint32x4, vsubq_u32)\nOPENCV_HAL_IMPL_NEON_BIN_OP(*, v_uint32x4, vmulq_u32)\nOPENCV_HAL_IMPL_NEON_BIN_OP(+, v_float32x4, vaddq_f32)\nOPENCV_HAL_IMPL_NEON_BIN_OP(-, v_float32x4, vsubq_f32)\nOPENCV_HAL_IMPL_NEON_BIN_OP(*, v_float32x4, vmulq_f32)\nOPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int64x2, vaddq_s64)\nOPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int64x2, vsubq_s64)\nOPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint64x2, vaddq_u64)\nOPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint64x2, vsubq_u64)\n\ninline v_float32x4 operator / (const v_float32x4& a, const v_float32x4& b)\n{\n    float32x4_t reciprocal = vrecpeq_f32(b.val);\n    reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal);\n    reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal);\n    return v_float32x4(vmulq_f32(a.val, reciprocal));\n}\ninline v_float32x4& operator /= (v_float32x4& a, const v_float32x4& b)\n{\n    float32x4_t reciprocal = vrecpeq_f32(b.val);\n    reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal);\n    reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal);\n    a.val = vmulq_f32(a.val, reciprocal);\n    return a;\n}\n\ninline void v_mul_expand(const v_int16x8& a, const v_int16x8& b,\n                         v_int32x4& c, v_int32x4& d)\n{\n    c.val = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val));\n    d.val = vmull_s16(vget_high_s16(a.val), vget_high_s16(b.val));\n}\n\ninline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b,\n                         v_uint32x4& c, v_uint32x4& d)\n{\n    c.val = vmull_u16(vget_low_u16(a.val), vget_low_u16(b.val));\n    d.val = vmull_u16(vget_high_u16(a.val), vget_high_u16(b.val));\n}\n\ninline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b,\n                         v_uint64x2& c, v_uint64x2& d)\n{\n    c.val = vmull_u32(vget_low_u32(a.val), vget_low_u32(b.val));\n    d.val = vmull_u32(vget_high_u32(a.val), vget_high_u32(b.val));\n}\n\ninline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b)\n{\n    int32x4_t c = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val));\n    int32x4_t d = vmull_s16(vget_high_s16(a.val), vget_high_s16(b.val));\n    int32x4x2_t cd = vuzpq_s32(c, d);\n    return v_int32x4(vaddq_s32(cd.val[0], cd.val[1]));\n}\n\n#define OPENCV_HAL_IMPL_NEON_LOGIC_OP(_Tpvec, suffix) \\\n    OPENCV_HAL_IMPL_NEON_BIN_OP(&, _Tpvec, vandq_##suffix) \\\n    OPENCV_HAL_IMPL_NEON_BIN_OP(|, _Tpvec, vorrq_##suffix) \\\n    OPENCV_HAL_IMPL_NEON_BIN_OP(^, _Tpvec, veorq_##suffix) \\\n    inline _Tpvec operator ~ (const _Tpvec& a) \\\n    { \\\n        return _Tpvec(vreinterpretq_##suffix##_u8(vmvnq_u8(vreinterpretq_u8_##suffix(a.val)))); \\\n    }\n\nOPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint8x16, u8)\nOPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int8x16, s8)\nOPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint16x8, u16)\nOPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int16x8, s16)\nOPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint32x4, u32)\nOPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int32x4, s32)\nOPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint64x2, u64)\nOPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int64x2, s64)\n\n#define OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(bin_op, intrin) \\\ninline v_float32x4 operator bin_op (const v_float32x4& a, const v_float32x4& b) \\\n{ \\\n    return v_float32x4(vreinterpretq_f32_s32(intrin(vreinterpretq_s32_f32(a.val), vreinterpretq_s32_f32(b.val)))); \\\n} \\\ninline v_float32x4& operator bin_op##= (v_float32x4& a, const v_float32x4& b) \\\n{ \\\n    a.val = vreinterpretq_f32_s32(intrin(vreinterpretq_s32_f32(a.val), vreinterpretq_s32_f32(b.val))); \\\n    return a; \\\n}\n\nOPENCV_HAL_IMPL_NEON_FLT_BIT_OP(&, vandq_s32)\nOPENCV_HAL_IMPL_NEON_FLT_BIT_OP(|, vorrq_s32)\nOPENCV_HAL_IMPL_NEON_FLT_BIT_OP(^, veorq_s32)\n\ninline v_float32x4 operator ~ (const v_float32x4& a)\n{\n    return v_float32x4(vreinterpretq_f32_s32(vmvnq_s32(vreinterpretq_s32_f32(a.val))));\n}\n\ninline v_float32x4 v_sqrt(const v_float32x4& x)\n{\n    float32x4_t x1 = vmaxq_f32(x.val, vdupq_n_f32(FLT_MIN));\n    float32x4_t e = vrsqrteq_f32(x1);\n    e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e);\n    e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e);\n    return v_float32x4(vmulq_f32(x.val, e));\n}\n\ninline v_float32x4 v_invsqrt(const v_float32x4& x)\n{\n    float32x4_t e = vrsqrteq_f32(x.val);\n    e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x.val, e), e), e);\n    e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x.val, e), e), e);\n    return v_float32x4(e);\n}\n\ninline v_float32x4 v_abs(v_float32x4 x)\n{ return v_float32x4(vabsq_f32(x.val)); }\n\n// TODO: exp, log, sin, cos\n\n#define OPENCV_HAL_IMPL_NEON_BIN_FUNC(_Tpvec, func, intrin) \\\ninline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    return _Tpvec(intrin(a.val, b.val)); \\\n}\n\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_min, vminq_u8)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_max, vmaxq_u8)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_min, vminq_s8)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_max, vmaxq_s8)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_min, vminq_u16)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_max, vmaxq_u16)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_min, vminq_s16)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_max, vmaxq_s16)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_min, vminq_u32)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_max, vmaxq_u32)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int32x4, v_min, vminq_s32)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int32x4, v_max, vmaxq_s32)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_min, vminq_f32)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_max, vmaxq_f32)\n\n\n#define OPENCV_HAL_IMPL_NEON_INT_CMP_OP(_Tpvec, cast, suffix, not_suffix) \\\ninline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(cast(vceqq_##suffix(a.val, b.val))); } \\\ninline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(cast(vmvnq_##not_suffix(vceqq_##suffix(a.val, b.val)))); } \\\ninline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(cast(vcltq_##suffix(a.val, b.val))); } \\\ninline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(cast(vcgtq_##suffix(a.val, b.val))); } \\\ninline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(cast(vcleq_##suffix(a.val, b.val))); } \\\ninline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(cast(vcgeq_##suffix(a.val, b.val))); }\n\nOPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint8x16, OPENCV_HAL_NOP, u8, u8)\nOPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int8x16, vreinterpretq_s8_u8, s8, u8)\nOPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint16x8, OPENCV_HAL_NOP, u16, u16)\nOPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int16x8, vreinterpretq_s16_u16, s16, u16)\nOPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint32x4, OPENCV_HAL_NOP, u32, u32)\nOPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int32x4, vreinterpretq_s32_u32, s32, u32)\nOPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float32x4, vreinterpretq_f32_u32, f32, u32)\n\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_add_wrap, vaddq_u8)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_add_wrap, vaddq_s8)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_add_wrap, vaddq_u16)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_add_wrap, vaddq_s16)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_sub_wrap, vsubq_u8)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_sub_wrap, vsubq_s8)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_sub_wrap, vsubq_u16)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_sub_wrap, vsubq_s16)\n\n// TODO: absdiff for signed integers\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_absdiff, vabdq_u8)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_absdiff, vabdq_u16)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_absdiff, vabdq_u32)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_absdiff, vabdq_f32)\n\n#define OPENCV_HAL_IMPL_NEON_BIN_FUNC2(_Tpvec, _Tpvec2, cast, func, intrin) \\\ninline _Tpvec2 func(const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    return _Tpvec2(cast(intrin(a.val, b.val))); \\\n}\n\nOPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int8x16, v_uint8x16, vreinterpretq_u8_s8, v_absdiff, vabdq_s8)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int16x8, v_uint16x8, vreinterpretq_u16_s16, v_absdiff, vabdq_s16)\nOPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int32x4, v_uint32x4, vreinterpretq_u32_s32, v_absdiff, vabdq_s32)\n\ninline v_float32x4 v_magnitude(const v_float32x4& a, const v_float32x4& b)\n{\n    v_float32x4 x(vmlaq_f32(vmulq_f32(a.val, a.val), b.val, b.val));\n    return v_sqrt(x);\n}\n\ninline v_float32x4 v_sqr_magnitude(const v_float32x4& a, const v_float32x4& b)\n{\n    return v_float32x4(vmlaq_f32(vmulq_f32(a.val, a.val), b.val, b.val));\n}\n\ninline v_float32x4 v_muladd(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c)\n{\n    return v_float32x4(vmlaq_f32(c.val, a.val, b.val));\n}\n\n// trade efficiency for convenience\n#define OPENCV_HAL_IMPL_NEON_SHIFT_OP(_Tpvec, suffix, _Tps, ssuffix) \\\ninline _Tpvec operator << (const _Tpvec& a, int n) \\\n{ return _Tpvec(vshlq_##suffix(a.val, vdupq_n_##ssuffix((_Tps)n))); } \\\ninline _Tpvec operator >> (const _Tpvec& a, int n) \\\n{ return _Tpvec(vshlq_##suffix(a.val, vdupq_n_##ssuffix((_Tps)-n))); } \\\ntemplate<int n> inline _Tpvec v_shl(const _Tpvec& a) \\\n{ return _Tpvec(vshlq_n_##suffix(a.val, n)); } \\\ntemplate<int n> inline _Tpvec v_shr(const _Tpvec& a) \\\n{ return _Tpvec(vshrq_n_##suffix(a.val, n)); } \\\ntemplate<int n> inline _Tpvec v_rshr(const _Tpvec& a) \\\n{ return _Tpvec(vrshrq_n_##suffix(a.val, n)); }\n\nOPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint8x16, u8, schar, s8)\nOPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int8x16, s8, schar, s8)\nOPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint16x8, u16, short, s16)\nOPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int16x8, s16, short, s16)\nOPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint32x4, u32, int, s32)\nOPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int32x4, s32, int, s32)\nOPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint64x2, u64, int64, s64)\nOPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int64x2, s64, int64, s64)\n\n#define OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(_Tpvec, _Tp, suffix) \\\ninline _Tpvec v_load(const _Tp* ptr) \\\n{ return _Tpvec(vld1q_##suffix(ptr)); } \\\ninline _Tpvec v_load_aligned(const _Tp* ptr) \\\n{ return _Tpvec(vld1q_##suffix(ptr)); } \\\ninline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \\\n{ return _Tpvec(vcombine_##suffix(vld1_##suffix(ptr0), vld1_##suffix(ptr1))); } \\\ninline void v_store(_Tp* ptr, const _Tpvec& a) \\\n{ vst1q_##suffix(ptr, a.val); } \\\ninline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \\\n{ vst1q_##suffix(ptr, a.val); } \\\ninline void v_store_low(_Tp* ptr, const _Tpvec& a) \\\n{ vst1_##suffix(ptr, vget_low_##suffix(a.val)); } \\\ninline void v_store_high(_Tp* ptr, const _Tpvec& a) \\\n{ vst1_##suffix(ptr, vget_high_##suffix(a.val)); }\n\nOPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint8x16, uchar, u8)\nOPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int8x16, schar, s8)\nOPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint16x8, ushort, u16)\nOPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int16x8, short, s16)\nOPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint32x4, unsigned, u32)\nOPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int32x4, int, s32)\nOPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint64x2, uint64, u64)\nOPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int64x2, int64, s64)\nOPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float32x4, float, f32)\n\n#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(_Tpvec, scalartype, func, scalar_func) \\\ninline scalartype v_reduce_##func(const _Tpvec& a) \\\n{ \\\n    scalartype CV_DECL_ALIGNED(16) buf[4]; \\\n    v_store_aligned(buf, a); \\\n    scalartype s0 = scalar_func(buf[0], buf[1]); \\\n    scalartype s1 = scalar_func(buf[2], buf[3]); \\\n    return scalar_func(s0, s1); \\\n}\n\nOPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, unsigned, sum, OPENCV_HAL_ADD)\nOPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, unsigned, max, std::max)\nOPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, unsigned, min, std::min)\nOPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int, sum, OPENCV_HAL_ADD)\nOPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int, max, std::max)\nOPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int, min, std::min)\nOPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float, sum, OPENCV_HAL_ADD)\nOPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float, max, std::max)\nOPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float, min, std::min)\n\ninline int v_signmask(const v_uint8x16& a)\n{\n    int8x8_t m0 = vcreate_s8(CV_BIG_UINT(0x0706050403020100));\n    uint8x16_t v0 = vshlq_u8(vshrq_n_u8(a.val, 7), vcombine_s8(m0, m0));\n    uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(v0)));\n    return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 8);\n}\ninline int v_signmask(const v_int8x16& a)\n{ return v_signmask(v_reinterpret_as_u8(a)); }\n\ninline int v_signmask(const v_uint16x8& a)\n{\n    int16x4_t m0 = vcreate_s16(CV_BIG_UINT(0x0003000200010000));\n    uint16x8_t v0 = vshlq_u16(vshrq_n_u16(a.val, 15), vcombine_s16(m0, m0));\n    uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(v0));\n    return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 4);\n}\ninline int v_signmask(const v_int16x8& a)\n{ return v_signmask(v_reinterpret_as_u16(a)); }\n\ninline int v_signmask(const v_uint32x4& a)\n{\n    int32x2_t m0 = vcreate_s32(CV_BIG_UINT(0x0000000100000000));\n    uint32x4_t v0 = vshlq_u32(vshrq_n_u32(a.val, 31), vcombine_s32(m0, m0));\n    uint64x2_t v1 = vpaddlq_u32(v0);\n    return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 2);\n}\ninline int v_signmask(const v_int32x4& a)\n{ return v_signmask(v_reinterpret_as_u32(a)); }\ninline int v_signmask(const v_float32x4& a)\n{ return v_signmask(v_reinterpret_as_u32(a)); }\n\n#define OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(_Tpvec, suffix, shift) \\\ninline bool v_check_all(const v_##_Tpvec& a) \\\n{ \\\n    _Tpvec##_t v0 = vshrq_n_##suffix(vmvnq_##suffix(a.val), shift); \\\n    uint64x2_t v1 = vreinterpretq_u64_##suffix(v0); \\\n    return (vgetq_lane_u64(v1, 0) | vgetq_lane_u64(v1, 1)) == 0; \\\n} \\\ninline bool v_check_any(const v_##_Tpvec& a) \\\n{ \\\n    _Tpvec##_t v0 = vshrq_n_##suffix(a.val, shift); \\\n    uint64x2_t v1 = vreinterpretq_u64_##suffix(v0); \\\n    return (vgetq_lane_u64(v1, 0) | vgetq_lane_u64(v1, 1)) != 0; \\\n}\n\nOPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint8x16, u8, 7)\nOPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint16x8, u16, 15)\nOPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint32x4, u32, 31)\n\ninline bool v_check_all(const v_int8x16& a)\n{ return v_check_all(v_reinterpret_as_u8(a)); }\ninline bool v_check_all(const v_int16x8& a)\n{ return v_check_all(v_reinterpret_as_u16(a)); }\ninline bool v_check_all(const v_int32x4& a)\n{ return v_check_all(v_reinterpret_as_u32(a)); }\ninline bool v_check_all(const v_float32x4& a)\n{ return v_check_all(v_reinterpret_as_u32(a)); }\n\ninline bool v_check_any(const v_int8x16& a)\n{ return v_check_any(v_reinterpret_as_u8(a)); }\ninline bool v_check_any(const v_int16x8& a)\n{ return v_check_any(v_reinterpret_as_u16(a)); }\ninline bool v_check_any(const v_int32x4& a)\n{ return v_check_any(v_reinterpret_as_u32(a)); }\ninline bool v_check_any(const v_float32x4& a)\n{ return v_check_any(v_reinterpret_as_u32(a)); }\n\n#define OPENCV_HAL_IMPL_NEON_SELECT(_Tpvec, suffix, usuffix) \\\ninline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    return _Tpvec(vbslq_##suffix(vreinterpretq_##usuffix##_##suffix(mask.val), a.val, b.val)); \\\n}\n\nOPENCV_HAL_IMPL_NEON_SELECT(v_uint8x16, u8, u8)\nOPENCV_HAL_IMPL_NEON_SELECT(v_int8x16, s8, u8)\nOPENCV_HAL_IMPL_NEON_SELECT(v_uint16x8, u16, u16)\nOPENCV_HAL_IMPL_NEON_SELECT(v_int16x8, s16, u16)\nOPENCV_HAL_IMPL_NEON_SELECT(v_uint32x4, u32, u32)\nOPENCV_HAL_IMPL_NEON_SELECT(v_int32x4, s32, u32)\nOPENCV_HAL_IMPL_NEON_SELECT(v_float32x4, f32, u32)\n\n#define OPENCV_HAL_IMPL_NEON_EXPAND(_Tpvec, _Tpwvec, _Tp, suffix) \\\ninline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \\\n{ \\\n    b0.val = vmovl_##suffix(vget_low_##suffix(a.val)); \\\n    b1.val = vmovl_##suffix(vget_high_##suffix(a.val)); \\\n} \\\ninline _Tpwvec v_load_expand(const _Tp* ptr) \\\n{ \\\n    return _Tpwvec(vmovl_##suffix(vld1_##suffix(ptr))); \\\n}\n\nOPENCV_HAL_IMPL_NEON_EXPAND(v_uint8x16, v_uint16x8, uchar, u8)\nOPENCV_HAL_IMPL_NEON_EXPAND(v_int8x16, v_int16x8, schar, s8)\nOPENCV_HAL_IMPL_NEON_EXPAND(v_uint16x8, v_uint32x4, ushort, u16)\nOPENCV_HAL_IMPL_NEON_EXPAND(v_int16x8, v_int32x4, short, s16)\nOPENCV_HAL_IMPL_NEON_EXPAND(v_uint32x4, v_uint64x2, uint, u32)\nOPENCV_HAL_IMPL_NEON_EXPAND(v_int32x4, v_int64x2, int, s32)\n\ninline v_uint32x4 v_load_expand_q(const uchar* ptr)\n{\n    uint8x8_t v0 = vcreate_u8(*(unsigned*)ptr);\n    uint16x4_t v1 = vget_low_u16(vmovl_u8(v0));\n    return v_uint32x4(vmovl_u16(v1));\n}\n\ninline v_int32x4 v_load_expand_q(const schar* ptr)\n{\n    int8x8_t v0 = vcreate_s8(*(unsigned*)ptr);\n    int16x4_t v1 = vget_low_s16(vmovl_s8(v0));\n    return v_int32x4(vmovl_s16(v1));\n}\n\n#define OPENCV_HAL_IMPL_NEON_UNPACKS(_Tpvec, suffix) \\\ninline void v_zip(const v_##_Tpvec& a0, const v_##_Tpvec& a1, v_##_Tpvec& b0, v_##_Tpvec& b1) \\\n{ \\\n    _Tpvec##x2_t p = vzipq_##suffix(a0.val, a1.val); \\\n    b0.val = p.val[0]; \\\n    b1.val = p.val[1]; \\\n} \\\ninline v_##_Tpvec v_combine_low(const v_##_Tpvec& a, const v_##_Tpvec& b) \\\n{ \\\n    return v_##_Tpvec(vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val))); \\\n} \\\ninline v_##_Tpvec v_combine_high(const v_##_Tpvec& a, const v_##_Tpvec& b) \\\n{ \\\n    return v_##_Tpvec(vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val))); \\\n} \\\ninline void v_recombine(const v_##_Tpvec& a, const v_##_Tpvec& b, v_##_Tpvec& c, v_##_Tpvec& d) \\\n{ \\\n    c.val = vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val)); \\\n    d.val = vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val)); \\\n}\n\nOPENCV_HAL_IMPL_NEON_UNPACKS(uint8x16, u8)\nOPENCV_HAL_IMPL_NEON_UNPACKS(int8x16, s8)\nOPENCV_HAL_IMPL_NEON_UNPACKS(uint16x8, u16)\nOPENCV_HAL_IMPL_NEON_UNPACKS(int16x8, s16)\nOPENCV_HAL_IMPL_NEON_UNPACKS(uint32x4, u32)\nOPENCV_HAL_IMPL_NEON_UNPACKS(int32x4, s32)\nOPENCV_HAL_IMPL_NEON_UNPACKS(float32x4, f32)\n\n#define OPENCV_HAL_IMPL_NEON_EXTRACT(_Tpvec, suffix) \\\ntemplate <int s> \\\ninline v_##_Tpvec v_extract(const v_##_Tpvec& a, const v_##_Tpvec& b) \\\n{ \\\n    return v_##_Tpvec(vextq_##suffix(a.val, b.val, s)); \\\n}\n\nOPENCV_HAL_IMPL_NEON_EXTRACT(uint8x16, u8)\nOPENCV_HAL_IMPL_NEON_EXTRACT(int8x16, s8)\nOPENCV_HAL_IMPL_NEON_EXTRACT(uint16x8, u16)\nOPENCV_HAL_IMPL_NEON_EXTRACT(int16x8, s16)\nOPENCV_HAL_IMPL_NEON_EXTRACT(uint32x4, u32)\nOPENCV_HAL_IMPL_NEON_EXTRACT(int32x4, s32)\nOPENCV_HAL_IMPL_NEON_EXTRACT(uint64x2, u64)\nOPENCV_HAL_IMPL_NEON_EXTRACT(int64x2, s64)\nOPENCV_HAL_IMPL_NEON_EXTRACT(float32x4, f32)\n\ninline v_int32x4 v_round(const v_float32x4& a)\n{\n    static const int32x4_t v_sign = vdupq_n_s32(1 << 31),\n        v_05 = vreinterpretq_s32_f32(vdupq_n_f32(0.5f));\n\n    int32x4_t v_addition = vorrq_s32(v_05, vandq_s32(v_sign, vreinterpretq_s32_f32(a.val)));\n    return v_int32x4(vcvtq_s32_f32(vaddq_f32(a.val, vreinterpretq_f32_s32(v_addition))));\n}\n\ninline v_int32x4 v_floor(const v_float32x4& a)\n{\n    int32x4_t a1 = vcvtq_s32_f32(a.val);\n    uint32x4_t mask = vcgtq_f32(vcvtq_f32_s32(a1), a.val);\n    return v_int32x4(vaddq_s32(a1, vreinterpretq_s32_u32(mask)));\n}\n\ninline v_int32x4 v_ceil(const v_float32x4& a)\n{\n    int32x4_t a1 = vcvtq_s32_f32(a.val);\n    uint32x4_t mask = vcgtq_f32(a.val, vcvtq_f32_s32(a1));\n    return v_int32x4(vsubq_s32(a1, vreinterpretq_s32_u32(mask)));\n}\n\ninline v_int32x4 v_trunc(const v_float32x4& a)\n{ return v_int32x4(vcvtq_s32_f32(a.val)); }\n\n#define OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(_Tpvec, suffix) \\\ninline void v_transpose4x4(const v_##_Tpvec& a0, const v_##_Tpvec& a1, \\\n                         const v_##_Tpvec& a2, const v_##_Tpvec& a3, \\\n                         v_##_Tpvec& b0, v_##_Tpvec& b1, \\\n                         v_##_Tpvec& b2, v_##_Tpvec& b3) \\\n{ \\\n    /* m00 m01 m02 m03 */ \\\n    /* m10 m11 m12 m13 */ \\\n    /* m20 m21 m22 m23 */ \\\n    /* m30 m31 m32 m33 */ \\\n    _Tpvec##x2_t t0 = vtrnq_##suffix(a0.val, a1.val); \\\n    _Tpvec##x2_t t1 = vtrnq_##suffix(a2.val, a3.val); \\\n    /* m00 m10 m02 m12 */ \\\n    /* m01 m11 m03 m13 */ \\\n    /* m20 m30 m22 m32 */ \\\n    /* m21 m31 m23 m33 */ \\\n    b0.val = vcombine_##suffix(vget_low_##suffix(t0.val[0]), vget_low_##suffix(t1.val[0])); \\\n    b1.val = vcombine_##suffix(vget_low_##suffix(t0.val[1]), vget_low_##suffix(t1.val[1])); \\\n    b2.val = vcombine_##suffix(vget_high_##suffix(t0.val[0]), vget_high_##suffix(t1.val[0])); \\\n    b3.val = vcombine_##suffix(vget_high_##suffix(t0.val[1]), vget_high_##suffix(t1.val[1])); \\\n}\n\nOPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(uint32x4, u32)\nOPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(int32x4, s32)\nOPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(float32x4, f32)\n\n#define OPENCV_HAL_IMPL_NEON_INTERLEAVED(_Tpvec, _Tp, suffix) \\\ninline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, v_##_Tpvec& c) \\\n{ \\\n    _Tpvec##x3_t v = vld3q_##suffix(ptr); \\\n    a.val = v.val[0]; \\\n    b.val = v.val[1]; \\\n    c.val = v.val[2]; \\\n} \\\ninline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, \\\n                                v_##_Tpvec& c, v_##_Tpvec& d) \\\n{ \\\n    _Tpvec##x4_t v = vld4q_##suffix(ptr); \\\n    a.val = v.val[0]; \\\n    b.val = v.val[1]; \\\n    c.val = v.val[2]; \\\n    d.val = v.val[3]; \\\n} \\\ninline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, const v_##_Tpvec& c) \\\n{ \\\n    _Tpvec##x3_t v; \\\n    v.val[0] = a.val; \\\n    v.val[1] = b.val; \\\n    v.val[2] = c.val; \\\n    vst3q_##suffix(ptr, v); \\\n} \\\ninline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, \\\n                               const v_##_Tpvec& c, const v_##_Tpvec& d) \\\n{ \\\n    _Tpvec##x4_t v; \\\n    v.val[0] = a.val; \\\n    v.val[1] = b.val; \\\n    v.val[2] = c.val; \\\n    v.val[3] = d.val; \\\n    vst4q_##suffix(ptr, v); \\\n}\n\nOPENCV_HAL_IMPL_NEON_INTERLEAVED(uint8x16, uchar, u8)\nOPENCV_HAL_IMPL_NEON_INTERLEAVED(int8x16, schar, s8)\nOPENCV_HAL_IMPL_NEON_INTERLEAVED(uint16x8, ushort, u16)\nOPENCV_HAL_IMPL_NEON_INTERLEAVED(int16x8, short, s16)\nOPENCV_HAL_IMPL_NEON_INTERLEAVED(uint32x4, unsigned, u32)\nOPENCV_HAL_IMPL_NEON_INTERLEAVED(int32x4, int, s32)\nOPENCV_HAL_IMPL_NEON_INTERLEAVED(float32x4, float, f32)\n\ninline v_float32x4 v_cvt_f32(const v_int32x4& a)\n{\n    return v_float32x4(vcvtq_f32_s32(a.val));\n}\n\n//! @endcond\n\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/hal/intrin_sse.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_HAL_SSE_HPP__\n#define __OPENCV_HAL_SSE_HPP__\n\n#include <algorithm>\n\n#define CV_SIMD128 1\n#define CV_SIMD128_64F 1\n\nnamespace cv\n{\n\n//! @cond IGNORED\n\nstruct v_uint8x16\n{\n    typedef uchar lane_type;\n    enum { nlanes = 16 };\n\n    v_uint8x16() {}\n    explicit v_uint8x16(__m128i v) : val(v) {}\n    v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7,\n               uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15)\n    {\n        val = _mm_setr_epi8((char)v0, (char)v1, (char)v2, (char)v3,\n                            (char)v4, (char)v5, (char)v6, (char)v7,\n                            (char)v8, (char)v9, (char)v10, (char)v11,\n                            (char)v12, (char)v13, (char)v14, (char)v15);\n    }\n    uchar get0() const\n    {\n        return (uchar)_mm_cvtsi128_si32(val);\n    }\n\n    __m128i val;\n};\n\nstruct v_int8x16\n{\n    typedef schar lane_type;\n    enum { nlanes = 16 };\n\n    v_int8x16() {}\n    explicit v_int8x16(__m128i v) : val(v) {}\n    v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7,\n              schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15)\n    {\n        val = _mm_setr_epi8((char)v0, (char)v1, (char)v2, (char)v3,\n                            (char)v4, (char)v5, (char)v6, (char)v7,\n                            (char)v8, (char)v9, (char)v10, (char)v11,\n                            (char)v12, (char)v13, (char)v14, (char)v15);\n    }\n    schar get0() const\n    {\n        return (schar)_mm_cvtsi128_si32(val);\n    }\n\n    __m128i val;\n};\n\nstruct v_uint16x8\n{\n    typedef ushort lane_type;\n    enum { nlanes = 8 };\n\n    v_uint16x8() {}\n    explicit v_uint16x8(__m128i v) : val(v) {}\n    v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7)\n    {\n        val = _mm_setr_epi16((short)v0, (short)v1, (short)v2, (short)v3,\n                             (short)v4, (short)v5, (short)v6, (short)v7);\n    }\n    ushort get0() const\n    {\n        return (ushort)_mm_cvtsi128_si32(val);\n    }\n\n    __m128i val;\n};\n\nstruct v_int16x8\n{\n    typedef short lane_type;\n    enum { nlanes = 8 };\n\n    v_int16x8() {}\n    explicit v_int16x8(__m128i v) : val(v) {}\n    v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)\n    {\n        val = _mm_setr_epi16((short)v0, (short)v1, (short)v2, (short)v3,\n                             (short)v4, (short)v5, (short)v6, (short)v7);\n    }\n    short get0() const\n    {\n        return (short)_mm_cvtsi128_si32(val);\n    }\n    __m128i val;\n};\n\nstruct v_uint32x4\n{\n    typedef unsigned lane_type;\n    enum { nlanes = 4 };\n\n    v_uint32x4() {}\n    explicit v_uint32x4(__m128i v) : val(v) {}\n    v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3)\n    {\n        val = _mm_setr_epi32((int)v0, (int)v1, (int)v2, (int)v3);\n    }\n    unsigned get0() const\n    {\n        return (unsigned)_mm_cvtsi128_si32(val);\n    }\n    __m128i val;\n};\n\nstruct v_int32x4\n{\n    typedef int lane_type;\n    enum { nlanes = 4 };\n\n    v_int32x4() {}\n    explicit v_int32x4(__m128i v) : val(v) {}\n    v_int32x4(int v0, int v1, int v2, int v3)\n    {\n        val = _mm_setr_epi32(v0, v1, v2, v3);\n    }\n    int get0() const\n    {\n        return _mm_cvtsi128_si32(val);\n    }\n    __m128i val;\n};\n\nstruct v_float32x4\n{\n    typedef float lane_type;\n    enum { nlanes = 4 };\n\n    v_float32x4() {}\n    explicit v_float32x4(__m128 v) : val(v) {}\n    v_float32x4(float v0, float v1, float v2, float v3)\n    {\n        val = _mm_setr_ps(v0, v1, v2, v3);\n    }\n    float get0() const\n    {\n        return _mm_cvtss_f32(val);\n    }\n    __m128 val;\n};\n\nstruct v_uint64x2\n{\n    typedef uint64 lane_type;\n    enum { nlanes = 2 };\n\n    v_uint64x2() {}\n    explicit v_uint64x2(__m128i v) : val(v) {}\n    v_uint64x2(uint64 v0, uint64 v1)\n    {\n        val = _mm_setr_epi32((int)v0, (int)(v0 >> 32), (int)v1, (int)(v1 >> 32));\n    }\n    uint64 get0() const\n    {\n        int a = _mm_cvtsi128_si32(val);\n        int b = _mm_cvtsi128_si32(_mm_srli_epi64(val, 32));\n        return (unsigned)a | ((uint64)(unsigned)b << 32);\n    }\n    __m128i val;\n};\n\nstruct v_int64x2\n{\n    typedef int64 lane_type;\n    enum { nlanes = 2 };\n\n    v_int64x2() {}\n    explicit v_int64x2(__m128i v) : val(v) {}\n    v_int64x2(int64 v0, int64 v1)\n    {\n        val = _mm_setr_epi32((int)v0, (int)(v0 >> 32), (int)v1, (int)(v1 >> 32));\n    }\n    int64 get0() const\n    {\n        int a = _mm_cvtsi128_si32(val);\n        int b = _mm_cvtsi128_si32(_mm_srli_epi64(val, 32));\n        return (int64)((unsigned)a | ((uint64)(unsigned)b << 32));\n    }\n    __m128i val;\n};\n\nstruct v_float64x2\n{\n    typedef double lane_type;\n    enum { nlanes = 2 };\n\n    v_float64x2() {}\n    explicit v_float64x2(__m128d v) : val(v) {}\n    v_float64x2(double v0, double v1)\n    {\n        val = _mm_setr_pd(v0, v1);\n    }\n    double get0() const\n    {\n        return _mm_cvtsd_f64(val);\n    }\n    __m128d val;\n};\n\n#define OPENCV_HAL_IMPL_SSE_INITVEC(_Tpvec, _Tp, suffix, zsuffix, ssuffix, _Tps, cast) \\\ninline _Tpvec v_setzero_##suffix() { return _Tpvec(_mm_setzero_##zsuffix()); } \\\ninline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(_mm_set1_##ssuffix((_Tps)v)); } \\\ntemplate<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0& a) \\\n{ return _Tpvec(cast(a.val)); }\n\nOPENCV_HAL_IMPL_SSE_INITVEC(v_uint8x16, uchar, u8, si128, epi8, char, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_INITVEC(v_int8x16, schar, s8, si128, epi8, char, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_INITVEC(v_uint16x8, ushort, u16, si128, epi16, short, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_INITVEC(v_int16x8, short, s16, si128, epi16, short, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_INITVEC(v_uint32x4, unsigned, u32, si128, epi32, int, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_INITVEC(v_int32x4, int, s32, si128, epi32, int, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_INITVEC(v_float32x4, float, f32, ps, ps, float, _mm_castsi128_ps)\nOPENCV_HAL_IMPL_SSE_INITVEC(v_float64x2, double, f64, pd, pd, double, _mm_castsi128_pd)\n\ninline v_uint64x2 v_setzero_u64() { return v_uint64x2(_mm_setzero_si128()); }\ninline v_int64x2 v_setzero_s64() { return v_int64x2(_mm_setzero_si128()); }\ninline v_uint64x2 v_setall_u64(uint64 val) { return v_uint64x2(val, val); }\ninline v_int64x2 v_setall_s64(int64 val) { return v_int64x2(val, val); }\n\ntemplate<typename _Tpvec> inline\nv_uint64x2 v_reinterpret_as_u64(const _Tpvec& a) { return v_uint64x2(a.val); }\ntemplate<typename _Tpvec> inline\nv_int64x2 v_reinterpret_as_s64(const _Tpvec& a) { return v_int64x2(a.val); }\ninline v_float32x4 v_reinterpret_as_f32(const v_uint64x2& a)\n{ return v_float32x4(_mm_castsi128_ps(a.val)); }\ninline v_float32x4 v_reinterpret_as_f32(const v_int64x2& a)\n{ return v_float32x4(_mm_castsi128_ps(a.val)); }\ninline v_float64x2 v_reinterpret_as_f64(const v_uint64x2& a)\n{ return v_float64x2(_mm_castsi128_pd(a.val)); }\ninline v_float64x2 v_reinterpret_as_f64(const v_int64x2& a)\n{ return v_float64x2(_mm_castsi128_pd(a.val)); }\n\n#define OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(_Tpvec, suffix) \\\ninline _Tpvec v_reinterpret_as_##suffix(const v_float32x4& a) \\\n{ return _Tpvec(_mm_castps_si128(a.val)); } \\\ninline _Tpvec v_reinterpret_as_##suffix(const v_float64x2& a) \\\n{ return _Tpvec(_mm_castpd_si128(a.val)); }\n\nOPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_uint8x16, u8)\nOPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_int8x16, s8)\nOPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_uint16x8, u16)\nOPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_int16x8, s16)\nOPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_uint32x4, u32)\nOPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_int32x4, s32)\nOPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_uint64x2, u64)\nOPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_int64x2, s64)\n\ninline v_float32x4 v_reinterpret_as_f32(const v_float32x4& a) {return a; }\ninline v_float64x2 v_reinterpret_as_f64(const v_float64x2& a) {return a; }\ninline v_float32x4 v_reinterpret_as_f32(const v_float64x2& a) {return v_float32x4(_mm_castpd_ps(a.val)); }\ninline v_float64x2 v_reinterpret_as_f64(const v_float32x4& a) {return v_float64x2(_mm_castps_pd(a.val)); }\n\n//////////////// PACK ///////////////\ninline v_uint8x16 v_pack(const v_uint16x8& a, const v_uint16x8& b)\n{\n    __m128i delta = _mm_set1_epi16(255);\n    return v_uint8x16(_mm_packus_epi16(_mm_subs_epu16(a.val, _mm_subs_epu16(a.val, delta)),\n                                       _mm_subs_epu16(b.val, _mm_subs_epu16(b.val, delta))));\n}\n\ninline void v_pack_store(uchar* ptr, const v_uint16x8& a)\n{\n    __m128i delta = _mm_set1_epi16(255);\n    __m128i a1 = _mm_subs_epu16(a.val, _mm_subs_epu16(a.val, delta));\n    _mm_storel_epi64((__m128i*)ptr, _mm_packus_epi16(a1, a1));\n}\n\ninline v_uint8x16 v_pack_u(const v_int16x8& a, const v_int16x8& b)\n{ return v_uint8x16(_mm_packus_epi16(a.val, b.val)); }\n\ninline void v_pack_u_store(uchar* ptr, const v_int16x8& a)\n{ _mm_storel_epi64((__m128i*)ptr, _mm_packus_epi16(a.val, a.val)); }\n\ntemplate<int n> inline\nv_uint8x16 v_rshr_pack(const v_uint16x8& a, const v_uint16x8& b)\n{\n    // we assume that n > 0, and so the shifted 16-bit values can be treated as signed numbers.\n    __m128i delta = _mm_set1_epi16((short)(1 << (n-1)));\n    return v_uint8x16(_mm_packus_epi16(_mm_srli_epi16(_mm_adds_epu16(a.val, delta), n),\n                                       _mm_srli_epi16(_mm_adds_epu16(b.val, delta), n)));\n}\n\ntemplate<int n> inline\nvoid v_rshr_pack_store(uchar* ptr, const v_uint16x8& a)\n{\n    __m128i delta = _mm_set1_epi16((short)(1 << (n-1)));\n    __m128i a1 = _mm_srli_epi16(_mm_adds_epu16(a.val, delta), n);\n    _mm_storel_epi64((__m128i*)ptr, _mm_packus_epi16(a1, a1));\n}\n\ntemplate<int n> inline\nv_uint8x16 v_rshr_pack_u(const v_int16x8& a, const v_int16x8& b)\n{\n    __m128i delta = _mm_set1_epi16((short)(1 << (n-1)));\n    return v_uint8x16(_mm_packus_epi16(_mm_srai_epi16(_mm_adds_epi16(a.val, delta), n),\n                                       _mm_srai_epi16(_mm_adds_epi16(b.val, delta), n)));\n}\n\ntemplate<int n> inline\nvoid v_rshr_pack_u_store(uchar* ptr, const v_int16x8& a)\n{\n    __m128i delta = _mm_set1_epi16((short)(1 << (n-1)));\n    __m128i a1 = _mm_srai_epi16(_mm_adds_epi16(a.val, delta), n);\n    _mm_storel_epi64((__m128i*)ptr, _mm_packus_epi16(a1, a1));\n}\n\ninline v_int8x16 v_pack(const v_int16x8& a, const v_int16x8& b)\n{ return v_int8x16(_mm_packs_epi16(a.val, b.val)); }\n\ninline void v_pack_store(schar* ptr, v_int16x8& a)\n{ _mm_storel_epi64((__m128i*)ptr, _mm_packs_epi16(a.val, a.val)); }\n\ntemplate<int n> inline\nv_int8x16 v_rshr_pack(const v_int16x8& a, const v_int16x8& b)\n{\n    // we assume that n > 0, and so the shifted 16-bit values can be treated as signed numbers.\n    __m128i delta = _mm_set1_epi16((short)(1 << (n-1)));\n    return v_int8x16(_mm_packs_epi16(_mm_srai_epi16(_mm_adds_epi16(a.val, delta), n),\n                                     _mm_srai_epi16(_mm_adds_epi16(b.val, delta), n)));\n}\ntemplate<int n> inline\nvoid v_rshr_pack_store(schar* ptr, const v_int16x8& a)\n{\n    // we assume that n > 0, and so the shifted 16-bit values can be treated as signed numbers.\n    __m128i delta = _mm_set1_epi16((short)(1 << (n-1)));\n    __m128i a1 = _mm_srai_epi16(_mm_adds_epi16(a.val, delta), n);\n    _mm_storel_epi64((__m128i*)ptr, _mm_packs_epi16(a1, a1));\n}\n\n\n// bit-wise \"mask ? a : b\"\ninline __m128i v_select_si128(__m128i mask, __m128i a, __m128i b)\n{\n    return _mm_xor_si128(b, _mm_and_si128(_mm_xor_si128(a, b), mask));\n}\n\ninline v_uint16x8 v_pack(const v_uint32x4& a, const v_uint32x4& b)\n{\n    __m128i z = _mm_setzero_si128(), maxval32 = _mm_set1_epi32(65535), delta32 = _mm_set1_epi32(32768);\n    __m128i a1 = _mm_sub_epi32(v_select_si128(_mm_cmpgt_epi32(z, a.val), maxval32, a.val), delta32);\n    __m128i b1 = _mm_sub_epi32(v_select_si128(_mm_cmpgt_epi32(z, b.val), maxval32, b.val), delta32);\n    __m128i r = _mm_packs_epi32(a1, b1);\n    return v_uint16x8(_mm_sub_epi16(r, _mm_set1_epi16(-32768)));\n}\n\ninline void v_pack_store(ushort* ptr, const v_uint32x4& a)\n{\n    __m128i z = _mm_setzero_si128(), maxval32 = _mm_set1_epi32(65535), delta32 = _mm_set1_epi32(32768);\n    __m128i a1 = _mm_sub_epi32(v_select_si128(_mm_cmpgt_epi32(z, a.val), maxval32, a.val), delta32);\n    __m128i r = _mm_packs_epi32(a1, a1);\n    _mm_storel_epi64((__m128i*)ptr, _mm_sub_epi16(r, _mm_set1_epi16(-32768)));\n}\n\ntemplate<int n> inline\nv_uint16x8 v_rshr_pack(const v_uint32x4& a, const v_uint32x4& b)\n{\n    __m128i delta = _mm_set1_epi32(1 << (n-1)), delta32 = _mm_set1_epi32(32768);\n    __m128i a1 = _mm_sub_epi32(_mm_srli_epi32(_mm_add_epi32(a.val, delta), n), delta32);\n    __m128i b1 = _mm_sub_epi32(_mm_srli_epi32(_mm_add_epi32(b.val, delta), n), delta32);\n    return v_uint16x8(_mm_sub_epi16(_mm_packs_epi32(a1, b1), _mm_set1_epi16(-32768)));\n}\n\ntemplate<int n> inline\nvoid v_rshr_pack_store(ushort* ptr, const v_uint32x4& a)\n{\n    __m128i delta = _mm_set1_epi32(1 << (n-1)), delta32 = _mm_set1_epi32(32768);\n    __m128i a1 = _mm_sub_epi32(_mm_srli_epi32(_mm_add_epi32(a.val, delta), n), delta32);\n    __m128i a2 = _mm_sub_epi16(_mm_packs_epi32(a1, a1), _mm_set1_epi16(-32768));\n    _mm_storel_epi64((__m128i*)ptr, a2);\n}\n\ninline v_uint16x8 v_pack_u(const v_int32x4& a, const v_int32x4& b)\n{\n    __m128i delta32 = _mm_set1_epi32(32768);\n    __m128i r = _mm_packs_epi32(_mm_sub_epi32(a.val, delta32), _mm_sub_epi32(b.val, delta32));\n    return v_uint16x8(_mm_sub_epi16(r, _mm_set1_epi16(-32768)));\n}\n\ninline void v_pack_u_store(ushort* ptr, const v_int32x4& a)\n{\n    __m128i delta32 = _mm_set1_epi32(32768);\n    __m128i a1 = _mm_sub_epi32(a.val, delta32);\n    __m128i r = _mm_sub_epi16(_mm_packs_epi32(a1, a1), _mm_set1_epi16(-32768));\n    _mm_storel_epi64((__m128i*)ptr, r);\n}\n\ntemplate<int n> inline\nv_uint16x8 v_rshr_pack_u(const v_int32x4& a, const v_int32x4& b)\n{\n    __m128i delta = _mm_set1_epi32(1 << (n-1)), delta32 = _mm_set1_epi32(32768);\n    __m128i a1 = _mm_sub_epi32(_mm_srai_epi32(_mm_add_epi32(a.val, delta), n), delta32);\n    __m128i a2 = _mm_sub_epi16(_mm_packs_epi32(a1, a1), _mm_set1_epi16(-32768));\n    __m128i b1 = _mm_sub_epi32(_mm_srai_epi32(_mm_add_epi32(b.val, delta), n), delta32);\n    __m128i b2 = _mm_sub_epi16(_mm_packs_epi32(b1, b1), _mm_set1_epi16(-32768));\n    return v_uint16x8(_mm_unpacklo_epi64(a2, b2));\n}\n\ntemplate<int n> inline\nvoid v_rshr_pack_u_store(ushort* ptr, const v_int32x4& a)\n{\n    __m128i delta = _mm_set1_epi32(1 << (n-1)), delta32 = _mm_set1_epi32(32768);\n    __m128i a1 = _mm_sub_epi32(_mm_srai_epi32(_mm_add_epi32(a.val, delta), n), delta32);\n    __m128i a2 = _mm_sub_epi16(_mm_packs_epi32(a1, a1), _mm_set1_epi16(-32768));\n    _mm_storel_epi64((__m128i*)ptr, a2);\n}\n\ninline v_int16x8 v_pack(const v_int32x4& a, const v_int32x4& b)\n{ return v_int16x8(_mm_packs_epi32(a.val, b.val)); }\n\ninline void v_pack_store(short* ptr, const v_int32x4& a)\n{\n    _mm_storel_epi64((__m128i*)ptr, _mm_packs_epi32(a.val, a.val));\n}\n\ntemplate<int n> inline\nv_int16x8 v_rshr_pack(const v_int32x4& a, const v_int32x4& b)\n{\n    __m128i delta = _mm_set1_epi32(1 << (n-1));\n    return v_int16x8(_mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(a.val, delta), n),\n                                     _mm_srai_epi32(_mm_add_epi32(b.val, delta), n)));\n}\n\ntemplate<int n> inline\nvoid v_rshr_pack_store(short* ptr, const v_int32x4& a)\n{\n    __m128i delta = _mm_set1_epi32(1 << (n-1));\n    __m128i a1 = _mm_srai_epi32(_mm_add_epi32(a.val, delta), n);\n    _mm_storel_epi64((__m128i*)ptr, _mm_packs_epi32(a1, a1));\n}\n\n\n// [a0 0 | b0 0]  [a1 0 | b1 0]\ninline v_uint32x4 v_pack(const v_uint64x2& a, const v_uint64x2& b)\n{\n    __m128i v0 = _mm_unpacklo_epi32(a.val, b.val); // a0 a1 0 0\n    __m128i v1 = _mm_unpackhi_epi32(a.val, b.val); // b0 b1 0 0\n    return v_uint32x4(_mm_unpacklo_epi32(v0, v1));\n}\n\ninline void v_pack_store(unsigned* ptr, const v_uint64x2& a)\n{\n    __m128i a1 = _mm_shuffle_epi32(a.val, _MM_SHUFFLE(0, 2, 2, 0));\n    _mm_storel_epi64((__m128i*)ptr, a1);\n}\n\n// [a0 0 | b0 0]  [a1 0 | b1 0]\ninline v_int32x4 v_pack(const v_int64x2& a, const v_int64x2& b)\n{\n    __m128i v0 = _mm_unpacklo_epi32(a.val, b.val); // a0 a1 0 0\n    __m128i v1 = _mm_unpackhi_epi32(a.val, b.val); // b0 b1 0 0\n    return v_int32x4(_mm_unpacklo_epi32(v0, v1));\n}\n\ninline void v_pack_store(int* ptr, const v_int64x2& a)\n{\n    __m128i a1 = _mm_shuffle_epi32(a.val, _MM_SHUFFLE(0, 2, 2, 0));\n    _mm_storel_epi64((__m128i*)ptr, a1);\n}\n\ntemplate<int n> inline\nv_uint32x4 v_rshr_pack(const v_uint64x2& a, const v_uint64x2& b)\n{\n    uint64 delta = (uint64)1 << (n-1);\n    v_uint64x2 delta2(delta, delta);\n    __m128i a1 = _mm_srli_epi64(_mm_add_epi64(a.val, delta2.val), n);\n    __m128i b1 = _mm_srli_epi64(_mm_add_epi64(b.val, delta2.val), n);\n    __m128i v0 = _mm_unpacklo_epi32(a1, b1); // a0 a1 0 0\n    __m128i v1 = _mm_unpackhi_epi32(a1, b1); // b0 b1 0 0\n    return v_uint32x4(_mm_unpacklo_epi32(v0, v1));\n}\n\ntemplate<int n> inline\nvoid v_rshr_pack_store(unsigned* ptr, const v_uint64x2& a)\n{\n    uint64 delta = (uint64)1 << (n-1);\n    v_uint64x2 delta2(delta, delta);\n    __m128i a1 = _mm_srli_epi64(_mm_add_epi64(a.val, delta2.val), n);\n    __m128i a2 = _mm_shuffle_epi32(a1, _MM_SHUFFLE(0, 2, 2, 0));\n    _mm_storel_epi64((__m128i*)ptr, a2);\n}\n\ninline __m128i v_sign_epi64(__m128i a)\n{\n    return _mm_shuffle_epi32(_mm_srai_epi32(a, 31), _MM_SHUFFLE(3, 3, 1, 1)); // x m0 | x m1\n}\n\ninline __m128i v_srai_epi64(__m128i a, int imm)\n{\n    __m128i smask = v_sign_epi64(a);\n    return _mm_xor_si128(_mm_srli_epi64(_mm_xor_si128(a, smask), imm), smask);\n}\n\ntemplate<int n> inline\nv_int32x4 v_rshr_pack(const v_int64x2& a, const v_int64x2& b)\n{\n    int64 delta = (int64)1 << (n-1);\n    v_int64x2 delta2(delta, delta);\n    __m128i a1 = v_srai_epi64(_mm_add_epi64(a.val, delta2.val), n);\n    __m128i b1 = v_srai_epi64(_mm_add_epi64(b.val, delta2.val), n);\n    __m128i v0 = _mm_unpacklo_epi32(a1, b1); // a0 a1 0 0\n    __m128i v1 = _mm_unpackhi_epi32(a1, b1); // b0 b1 0 0\n    return v_int32x4(_mm_unpacklo_epi32(v0, v1));\n}\n\ntemplate<int n> inline\nvoid v_rshr_pack_store(int* ptr, const v_int64x2& a)\n{\n    int64 delta = (int64)1 << (n-1);\n    v_int64x2 delta2(delta, delta);\n    __m128i a1 = v_srai_epi64(_mm_add_epi64(a.val, delta2.val), n);\n    __m128i a2 = _mm_shuffle_epi32(a1, _MM_SHUFFLE(0, 2, 2, 0));\n    _mm_storel_epi64((__m128i*)ptr, a2);\n}\n\ninline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,\n                            const v_float32x4& m1, const v_float32x4& m2,\n                            const v_float32x4& m3)\n{\n    __m128 v0 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(0, 0, 0, 0)), m0.val);\n    __m128 v1 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(1, 1, 1, 1)), m1.val);\n    __m128 v2 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(2, 2, 2, 2)), m2.val);\n    __m128 v3 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(3, 3, 3, 3)), m3.val);\n\n    return v_float32x4(_mm_add_ps(_mm_add_ps(v0, v1), _mm_add_ps(v2, v3)));\n}\n\n\n#define OPENCV_HAL_IMPL_SSE_BIN_OP(bin_op, _Tpvec, intrin) \\\n    inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \\\n    { \\\n        return _Tpvec(intrin(a.val, b.val)); \\\n    } \\\n    inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \\\n    { \\\n        a.val = intrin(a.val, b.val); \\\n        return a; \\\n    }\n\nOPENCV_HAL_IMPL_SSE_BIN_OP(+, v_uint8x16, _mm_adds_epu8)\nOPENCV_HAL_IMPL_SSE_BIN_OP(-, v_uint8x16, _mm_subs_epu8)\nOPENCV_HAL_IMPL_SSE_BIN_OP(+, v_int8x16, _mm_adds_epi8)\nOPENCV_HAL_IMPL_SSE_BIN_OP(-, v_int8x16, _mm_subs_epi8)\nOPENCV_HAL_IMPL_SSE_BIN_OP(+, v_uint16x8, _mm_adds_epu16)\nOPENCV_HAL_IMPL_SSE_BIN_OP(-, v_uint16x8, _mm_subs_epu16)\nOPENCV_HAL_IMPL_SSE_BIN_OP(*, v_uint16x8, _mm_mullo_epi16)\nOPENCV_HAL_IMPL_SSE_BIN_OP(+, v_int16x8, _mm_adds_epi16)\nOPENCV_HAL_IMPL_SSE_BIN_OP(-, v_int16x8, _mm_subs_epi16)\nOPENCV_HAL_IMPL_SSE_BIN_OP(*, v_int16x8, _mm_mullo_epi16)\nOPENCV_HAL_IMPL_SSE_BIN_OP(+, v_uint32x4, _mm_add_epi32)\nOPENCV_HAL_IMPL_SSE_BIN_OP(-, v_uint32x4, _mm_sub_epi32)\nOPENCV_HAL_IMPL_SSE_BIN_OP(+, v_int32x4, _mm_add_epi32)\nOPENCV_HAL_IMPL_SSE_BIN_OP(-, v_int32x4, _mm_sub_epi32)\nOPENCV_HAL_IMPL_SSE_BIN_OP(+, v_float32x4, _mm_add_ps)\nOPENCV_HAL_IMPL_SSE_BIN_OP(-, v_float32x4, _mm_sub_ps)\nOPENCV_HAL_IMPL_SSE_BIN_OP(*, v_float32x4, _mm_mul_ps)\nOPENCV_HAL_IMPL_SSE_BIN_OP(/, v_float32x4, _mm_div_ps)\nOPENCV_HAL_IMPL_SSE_BIN_OP(+, v_float64x2, _mm_add_pd)\nOPENCV_HAL_IMPL_SSE_BIN_OP(-, v_float64x2, _mm_sub_pd)\nOPENCV_HAL_IMPL_SSE_BIN_OP(*, v_float64x2, _mm_mul_pd)\nOPENCV_HAL_IMPL_SSE_BIN_OP(/, v_float64x2, _mm_div_pd)\nOPENCV_HAL_IMPL_SSE_BIN_OP(+, v_uint64x2, _mm_add_epi64)\nOPENCV_HAL_IMPL_SSE_BIN_OP(-, v_uint64x2, _mm_sub_epi64)\nOPENCV_HAL_IMPL_SSE_BIN_OP(+, v_int64x2, _mm_add_epi64)\nOPENCV_HAL_IMPL_SSE_BIN_OP(-, v_int64x2, _mm_sub_epi64)\n\ninline v_uint32x4 operator * (const v_uint32x4& a, const v_uint32x4& b)\n{\n    __m128i c0 = _mm_mul_epu32(a.val, b.val);\n    __m128i c1 = _mm_mul_epu32(_mm_srli_epi64(a.val, 32), _mm_srli_epi64(b.val, 32));\n    __m128i d0 = _mm_unpacklo_epi32(c0, c1);\n    __m128i d1 = _mm_unpackhi_epi32(c0, c1);\n    return v_uint32x4(_mm_unpacklo_epi64(d0, d1));\n}\ninline v_int32x4 operator * (const v_int32x4& a, const v_int32x4& b)\n{\n    __m128i c0 = _mm_mul_epu32(a.val, b.val);\n    __m128i c1 = _mm_mul_epu32(_mm_srli_epi64(a.val, 32), _mm_srli_epi64(b.val, 32));\n    __m128i d0 = _mm_unpacklo_epi32(c0, c1);\n    __m128i d1 = _mm_unpackhi_epi32(c0, c1);\n    return v_int32x4(_mm_unpacklo_epi64(d0, d1));\n}\ninline v_uint32x4& operator *= (v_uint32x4& a, const v_uint32x4& b)\n{\n    a = a * b;\n    return a;\n}\ninline v_int32x4& operator *= (v_int32x4& a, const v_int32x4& b)\n{\n    a = a * b;\n    return a;\n}\n\ninline void v_mul_expand(const v_int16x8& a, const v_int16x8& b,\n                         v_int32x4& c, v_int32x4& d)\n{\n    __m128i v0 = _mm_mullo_epi16(a.val, b.val);\n    __m128i v1 = _mm_mulhi_epi16(a.val, b.val);\n    c.val = _mm_unpacklo_epi16(v0, v1);\n    d.val = _mm_unpackhi_epi16(v0, v1);\n}\n\ninline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b,\n                         v_uint32x4& c, v_uint32x4& d)\n{\n    __m128i v0 = _mm_mullo_epi16(a.val, b.val);\n    __m128i v1 = _mm_mulhi_epu16(a.val, b.val);\n    c.val = _mm_unpacklo_epi16(v0, v1);\n    d.val = _mm_unpackhi_epi16(v0, v1);\n}\n\ninline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b,\n                         v_uint64x2& c, v_uint64x2& d)\n{\n    __m128i c0 = _mm_mul_epu32(a.val, b.val);\n    __m128i c1 = _mm_mul_epu32(_mm_srli_epi64(a.val, 32), _mm_srli_epi64(b.val, 32));\n    c.val = _mm_unpacklo_epi64(c0, c1);\n    d.val = _mm_unpackhi_epi64(c0, c1);\n}\n\ninline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b)\n{\n    return v_int32x4(_mm_madd_epi16(a.val, b.val));\n}\n\n#define OPENCV_HAL_IMPL_SSE_LOGIC_OP(_Tpvec, suffix, not_const) \\\n    OPENCV_HAL_IMPL_SSE_BIN_OP(&, _Tpvec, _mm_and_##suffix) \\\n    OPENCV_HAL_IMPL_SSE_BIN_OP(|, _Tpvec, _mm_or_##suffix) \\\n    OPENCV_HAL_IMPL_SSE_BIN_OP(^, _Tpvec, _mm_xor_##suffix) \\\n    inline _Tpvec operator ~ (const _Tpvec& a) \\\n    { \\\n        return _Tpvec(_mm_xor_##suffix(a.val, not_const)); \\\n    }\n\nOPENCV_HAL_IMPL_SSE_LOGIC_OP(v_uint8x16, si128, _mm_set1_epi32(-1))\nOPENCV_HAL_IMPL_SSE_LOGIC_OP(v_int8x16, si128, _mm_set1_epi32(-1))\nOPENCV_HAL_IMPL_SSE_LOGIC_OP(v_uint16x8, si128, _mm_set1_epi32(-1))\nOPENCV_HAL_IMPL_SSE_LOGIC_OP(v_int16x8, si128, _mm_set1_epi32(-1))\nOPENCV_HAL_IMPL_SSE_LOGIC_OP(v_uint32x4, si128, _mm_set1_epi32(-1))\nOPENCV_HAL_IMPL_SSE_LOGIC_OP(v_int32x4, si128, _mm_set1_epi32(-1))\nOPENCV_HAL_IMPL_SSE_LOGIC_OP(v_uint64x2, si128, _mm_set1_epi32(-1))\nOPENCV_HAL_IMPL_SSE_LOGIC_OP(v_int64x2, si128, _mm_set1_epi32(-1))\nOPENCV_HAL_IMPL_SSE_LOGIC_OP(v_float32x4, ps, _mm_castsi128_ps(_mm_set1_epi32(-1)))\nOPENCV_HAL_IMPL_SSE_LOGIC_OP(v_float64x2, pd, _mm_castsi128_pd(_mm_set1_epi32(-1)))\n\ninline v_float32x4 v_sqrt(const v_float32x4& x)\n{ return v_float32x4(_mm_sqrt_ps(x.val)); }\n\ninline v_float32x4 v_invsqrt(const v_float32x4& x)\n{\n    static const __m128 _0_5 = _mm_set1_ps(0.5f), _1_5 = _mm_set1_ps(1.5f);\n    __m128 t = x.val;\n    __m128 h = _mm_mul_ps(t, _0_5);\n    t = _mm_rsqrt_ps(t);\n    t = _mm_mul_ps(t, _mm_sub_ps(_1_5, _mm_mul_ps(_mm_mul_ps(t, t), h)));\n    return v_float32x4(t);\n}\n\ninline v_float64x2 v_sqrt(const v_float64x2& x)\n{ return v_float64x2(_mm_sqrt_pd(x.val)); }\n\ninline v_float64x2 v_invsqrt(const v_float64x2& x)\n{\n    static const __m128d v_1 = _mm_set1_pd(1.);\n    return v_float64x2(_mm_div_pd(v_1, _mm_sqrt_pd(x.val)));\n}\n\ninline v_float32x4 v_abs(const v_float32x4& x)\n{ return v_float32x4(_mm_and_ps(x.val, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)))); }\ninline v_float64x2 v_abs(const v_float64x2& x)\n{\n    return v_float64x2(_mm_and_pd(x.val,\n        _mm_castsi128_pd(_mm_srli_epi64(_mm_set1_epi32(-1), 1))));\n}\n\n// TODO: exp, log, sin, cos\n\n#define OPENCV_HAL_IMPL_SSE_BIN_FUNC(_Tpvec, func, intrin) \\\ninline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    return _Tpvec(intrin(a.val, b.val)); \\\n}\n\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint8x16, v_min, _mm_min_epu8)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint8x16, v_max, _mm_max_epu8)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int16x8, v_min, _mm_min_epi16)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int16x8, v_max, _mm_max_epi16)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_float32x4, v_min, _mm_min_ps)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_float32x4, v_max, _mm_max_ps)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_float64x2, v_min, _mm_min_pd)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_float64x2, v_max, _mm_max_pd)\n\ninline v_int8x16 v_min(const v_int8x16& a, const v_int8x16& b)\n{\n    __m128i delta = _mm_set1_epi8((char)-128);\n    return v_int8x16(_mm_xor_si128(delta, _mm_min_epu8(_mm_xor_si128(a.val, delta),\n                                                       _mm_xor_si128(b.val, delta))));\n}\ninline v_int8x16 v_max(const v_int8x16& a, const v_int8x16& b)\n{\n    __m128i delta = _mm_set1_epi8((char)-128);\n    return v_int8x16(_mm_xor_si128(delta, _mm_max_epu8(_mm_xor_si128(a.val, delta),\n                                                       _mm_xor_si128(b.val, delta))));\n}\ninline v_uint16x8 v_min(const v_uint16x8& a, const v_uint16x8& b)\n{\n    return v_uint16x8(_mm_subs_epu16(a.val, _mm_subs_epu16(a.val, b.val)));\n}\ninline v_uint16x8 v_max(const v_uint16x8& a, const v_uint16x8& b)\n{\n    return v_uint16x8(_mm_adds_epu16(_mm_subs_epu16(a.val, b.val), b.val));\n}\ninline v_uint32x4 v_min(const v_uint32x4& a, const v_uint32x4& b)\n{\n    __m128i delta = _mm_set1_epi32((int)0x80000000);\n    __m128i mask = _mm_cmpgt_epi32(_mm_xor_si128(a.val, delta), _mm_xor_si128(b.val, delta));\n    return v_uint32x4(v_select_si128(mask, b.val, a.val));\n}\ninline v_uint32x4 v_max(const v_uint32x4& a, const v_uint32x4& b)\n{\n    __m128i delta = _mm_set1_epi32((int)0x80000000);\n    __m128i mask = _mm_cmpgt_epi32(_mm_xor_si128(a.val, delta), _mm_xor_si128(b.val, delta));\n    return v_uint32x4(v_select_si128(mask, a.val, b.val));\n}\ninline v_int32x4 v_min(const v_int32x4& a, const v_int32x4& b)\n{\n    return v_int32x4(v_select_si128(_mm_cmpgt_epi32(a.val, b.val), b.val, a.val));\n}\ninline v_int32x4 v_max(const v_int32x4& a, const v_int32x4& b)\n{\n    return v_int32x4(v_select_si128(_mm_cmpgt_epi32(a.val, b.val), a.val, b.val));\n}\n\n#define OPENCV_HAL_IMPL_SSE_INT_CMP_OP(_Tpuvec, _Tpsvec, suffix, sbit) \\\ninline _Tpuvec operator == (const _Tpuvec& a, const _Tpuvec& b) \\\n{ return _Tpuvec(_mm_cmpeq_##suffix(a.val, b.val)); } \\\ninline _Tpuvec operator != (const _Tpuvec& a, const _Tpuvec& b) \\\n{ \\\n    __m128i not_mask = _mm_set1_epi32(-1); \\\n    return _Tpuvec(_mm_xor_si128(_mm_cmpeq_##suffix(a.val, b.val), not_mask)); \\\n} \\\ninline _Tpsvec operator == (const _Tpsvec& a, const _Tpsvec& b) \\\n{ return _Tpsvec(_mm_cmpeq_##suffix(a.val, b.val)); } \\\ninline _Tpsvec operator != (const _Tpsvec& a, const _Tpsvec& b) \\\n{ \\\n    __m128i not_mask = _mm_set1_epi32(-1); \\\n    return _Tpsvec(_mm_xor_si128(_mm_cmpeq_##suffix(a.val, b.val), not_mask)); \\\n} \\\ninline _Tpuvec operator < (const _Tpuvec& a, const _Tpuvec& b) \\\n{ \\\n    __m128i smask = _mm_set1_##suffix(sbit); \\\n    return _Tpuvec(_mm_cmpgt_##suffix(_mm_xor_si128(b.val, smask), _mm_xor_si128(a.val, smask))); \\\n} \\\ninline _Tpuvec operator > (const _Tpuvec& a, const _Tpuvec& b) \\\n{ \\\n    __m128i smask = _mm_set1_##suffix(sbit); \\\n    return _Tpuvec(_mm_cmpgt_##suffix(_mm_xor_si128(a.val, smask), _mm_xor_si128(b.val, smask))); \\\n} \\\ninline _Tpuvec operator <= (const _Tpuvec& a, const _Tpuvec& b) \\\n{ \\\n    __m128i smask = _mm_set1_##suffix(sbit); \\\n    __m128i not_mask = _mm_set1_epi32(-1); \\\n    __m128i res = _mm_cmpgt_##suffix(_mm_xor_si128(a.val, smask), _mm_xor_si128(b.val, smask)); \\\n    return _Tpuvec(_mm_xor_si128(res, not_mask)); \\\n} \\\ninline _Tpuvec operator >= (const _Tpuvec& a, const _Tpuvec& b) \\\n{ \\\n    __m128i smask = _mm_set1_##suffix(sbit); \\\n    __m128i not_mask = _mm_set1_epi32(-1); \\\n    __m128i res = _mm_cmpgt_##suffix(_mm_xor_si128(b.val, smask), _mm_xor_si128(a.val, smask)); \\\n    return _Tpuvec(_mm_xor_si128(res, not_mask)); \\\n} \\\ninline _Tpsvec operator < (const _Tpsvec& a, const _Tpsvec& b) \\\n{ \\\n    return _Tpsvec(_mm_cmpgt_##suffix(b.val, a.val)); \\\n} \\\ninline _Tpsvec operator > (const _Tpsvec& a, const _Tpsvec& b) \\\n{ \\\n    return _Tpsvec(_mm_cmpgt_##suffix(a.val, b.val)); \\\n} \\\ninline _Tpsvec operator <= (const _Tpsvec& a, const _Tpsvec& b) \\\n{ \\\n    __m128i not_mask = _mm_set1_epi32(-1); \\\n    return _Tpsvec(_mm_xor_si128(_mm_cmpgt_##suffix(a.val, b.val), not_mask)); \\\n} \\\ninline _Tpsvec operator >= (const _Tpsvec& a, const _Tpsvec& b) \\\n{ \\\n    __m128i not_mask = _mm_set1_epi32(-1); \\\n    return _Tpsvec(_mm_xor_si128(_mm_cmpgt_##suffix(b.val, a.val), not_mask)); \\\n}\n\nOPENCV_HAL_IMPL_SSE_INT_CMP_OP(v_uint8x16, v_int8x16, epi8, (char)-128)\nOPENCV_HAL_IMPL_SSE_INT_CMP_OP(v_uint16x8, v_int16x8, epi16, (short)-32768)\nOPENCV_HAL_IMPL_SSE_INT_CMP_OP(v_uint32x4, v_int32x4, epi32, (int)0x80000000)\n\n#define OPENCV_HAL_IMPL_SSE_FLT_CMP_OP(_Tpvec, suffix) \\\ninline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(_mm_cmpeq_##suffix(a.val, b.val)); } \\\ninline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(_mm_cmpneq_##suffix(a.val, b.val)); } \\\ninline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(_mm_cmplt_##suffix(a.val, b.val)); } \\\ninline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(_mm_cmpgt_##suffix(a.val, b.val)); } \\\ninline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(_mm_cmple_##suffix(a.val, b.val)); } \\\ninline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \\\n{ return _Tpvec(_mm_cmpge_##suffix(a.val, b.val)); }\n\nOPENCV_HAL_IMPL_SSE_FLT_CMP_OP(v_float32x4, ps)\nOPENCV_HAL_IMPL_SSE_FLT_CMP_OP(v_float64x2, pd)\n\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint8x16, v_add_wrap, _mm_add_epi8)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int8x16, v_add_wrap, _mm_add_epi8)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint16x8, v_add_wrap, _mm_add_epi16)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int16x8, v_add_wrap, _mm_add_epi16)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint8x16, v_sub_wrap, _mm_sub_epi8)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int8x16, v_sub_wrap, _mm_sub_epi8)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint16x8, v_sub_wrap, _mm_sub_epi16)\nOPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int16x8, v_sub_wrap, _mm_sub_epi16)\n\n#define OPENCV_HAL_IMPL_SSE_ABSDIFF_8_16(_Tpuvec, _Tpsvec, bits, smask32) \\\ninline _Tpuvec v_absdiff(const _Tpuvec& a, const _Tpuvec& b) \\\n{ \\\n    return _Tpuvec(_mm_add_epi##bits(_mm_subs_epu##bits(a.val, b.val), _mm_subs_epu##bits(b.val, a.val))); \\\n} \\\ninline _Tpuvec v_absdiff(const _Tpsvec& a, const _Tpsvec& b) \\\n{ \\\n    __m128i smask = _mm_set1_epi32(smask32); \\\n    __m128i a1 = _mm_xor_si128(a.val, smask); \\\n    __m128i b1 = _mm_xor_si128(b.val, smask); \\\n    return _Tpuvec(_mm_add_epi##bits(_mm_subs_epu##bits(a1, b1), _mm_subs_epu##bits(b1, a1))); \\\n}\n\nOPENCV_HAL_IMPL_SSE_ABSDIFF_8_16(v_uint8x16, v_int8x16, 8, (int)0x80808080)\nOPENCV_HAL_IMPL_SSE_ABSDIFF_8_16(v_uint16x8, v_int16x8, 16, (int)0x80008000)\n\ninline v_uint32x4 v_absdiff(const v_uint32x4& a, const v_uint32x4& b)\n{\n    return v_max(a, b) - v_min(a, b);\n}\n\ninline v_uint32x4 v_absdiff(const v_int32x4& a, const v_int32x4& b)\n{\n    __m128i d = _mm_sub_epi32(a.val, b.val);\n    __m128i m = _mm_cmpgt_epi32(b.val, a.val);\n    return v_uint32x4(_mm_sub_epi32(_mm_xor_si128(d, m), m));\n}\n\n#define OPENCV_HAL_IMPL_SSE_MISC_FLT_OP(_Tpvec, _Tp, _Tpreg, suffix, absmask_vec) \\\ninline _Tpvec v_absdiff(const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    _Tpreg absmask = _mm_castsi128_##suffix(absmask_vec); \\\n    return _Tpvec(_mm_and_##suffix(_mm_sub_##suffix(a.val, b.val), absmask)); \\\n} \\\ninline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    _Tpreg res = _mm_add_##suffix(_mm_mul_##suffix(a.val, a.val), _mm_mul_##suffix(b.val, b.val)); \\\n    return _Tpvec(_mm_sqrt_##suffix(res)); \\\n} \\\ninline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    _Tpreg res = _mm_add_##suffix(_mm_mul_##suffix(a.val, a.val), _mm_mul_##suffix(b.val, b.val)); \\\n    return _Tpvec(res); \\\n} \\\ninline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \\\n{ \\\n    return _Tpvec(_mm_add_##suffix(_mm_mul_##suffix(a.val, b.val), c.val)); \\\n}\n\nOPENCV_HAL_IMPL_SSE_MISC_FLT_OP(v_float32x4, float, __m128, ps, _mm_set1_epi32((int)0x7fffffff))\nOPENCV_HAL_IMPL_SSE_MISC_FLT_OP(v_float64x2, double, __m128d, pd, _mm_srli_epi64(_mm_set1_epi32(-1), 1))\n\n#define OPENCV_HAL_IMPL_SSE_SHIFT_OP(_Tpuvec, _Tpsvec, suffix, srai) \\\ninline _Tpuvec operator << (const _Tpuvec& a, int imm) \\\n{ \\\n    return _Tpuvec(_mm_slli_##suffix(a.val, imm)); \\\n} \\\ninline _Tpsvec operator << (const _Tpsvec& a, int imm) \\\n{ \\\n    return _Tpsvec(_mm_slli_##suffix(a.val, imm)); \\\n} \\\ninline _Tpuvec operator >> (const _Tpuvec& a, int imm) \\\n{ \\\n    return _Tpuvec(_mm_srli_##suffix(a.val, imm)); \\\n} \\\ninline _Tpsvec operator >> (const _Tpsvec& a, int imm) \\\n{ \\\n    return _Tpsvec(srai(a.val, imm)); \\\n} \\\ntemplate<int imm> \\\ninline _Tpuvec v_shl(const _Tpuvec& a) \\\n{ \\\n    return _Tpuvec(_mm_slli_##suffix(a.val, imm)); \\\n} \\\ntemplate<int imm> \\\ninline _Tpsvec v_shl(const _Tpsvec& a) \\\n{ \\\n    return _Tpsvec(_mm_slli_##suffix(a.val, imm)); \\\n} \\\ntemplate<int imm> \\\ninline _Tpuvec v_shr(const _Tpuvec& a) \\\n{ \\\n    return _Tpuvec(_mm_srli_##suffix(a.val, imm)); \\\n} \\\ntemplate<int imm> \\\ninline _Tpsvec v_shr(const _Tpsvec& a) \\\n{ \\\n    return _Tpsvec(srai(a.val, imm)); \\\n}\n\nOPENCV_HAL_IMPL_SSE_SHIFT_OP(v_uint16x8, v_int16x8, epi16, _mm_srai_epi16)\nOPENCV_HAL_IMPL_SSE_SHIFT_OP(v_uint32x4, v_int32x4, epi32, _mm_srai_epi32)\nOPENCV_HAL_IMPL_SSE_SHIFT_OP(v_uint64x2, v_int64x2, epi64, v_srai_epi64)\n\n#define OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(_Tpvec, _Tp) \\\ninline _Tpvec v_load(const _Tp* ptr) \\\n{ return _Tpvec(_mm_loadu_si128((const __m128i*)ptr)); } \\\ninline _Tpvec v_load_aligned(const _Tp* ptr) \\\n{ return _Tpvec(_mm_load_si128((const __m128i*)ptr)); } \\\ninline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \\\n{ \\\n    return _Tpvec(_mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i*)ptr0), \\\n                                     _mm_loadl_epi64((const __m128i*)ptr1))); \\\n} \\\ninline void v_store(_Tp* ptr, const _Tpvec& a) \\\n{ _mm_storeu_si128((__m128i*)ptr, a.val); } \\\ninline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \\\n{ _mm_store_si128((__m128i*)ptr, a.val); } \\\ninline void v_store_low(_Tp* ptr, const _Tpvec& a) \\\n{ _mm_storel_epi64((__m128i*)ptr, a.val); } \\\ninline void v_store_high(_Tp* ptr, const _Tpvec& a) \\\n{ _mm_storel_epi64((__m128i*)ptr, _mm_unpackhi_epi64(a.val, a.val)); }\n\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_uint8x16, uchar)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_int8x16, schar)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_uint16x8, ushort)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_int16x8, short)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_uint32x4, unsigned)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_int32x4, int)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_uint64x2, uint64)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_int64x2, int64)\n\n#define OPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(_Tpvec, _Tp, suffix) \\\ninline _Tpvec v_load(const _Tp* ptr) \\\n{ return _Tpvec(_mm_loadu_##suffix(ptr)); } \\\ninline _Tpvec v_load_aligned(const _Tp* ptr) \\\n{ return _Tpvec(_mm_load_##suffix(ptr)); } \\\ninline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \\\n{ \\\n    return _Tpvec(_mm_castsi128_##suffix( \\\n        _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i*)ptr0), \\\n                           _mm_loadl_epi64((const __m128i*)ptr1)))); \\\n} \\\ninline void v_store(_Tp* ptr, const _Tpvec& a) \\\n{ _mm_storeu_##suffix(ptr, a.val); } \\\ninline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \\\n{ _mm_store_##suffix(ptr, a.val); } \\\ninline void v_store_low(_Tp* ptr, const _Tpvec& a) \\\n{ _mm_storel_epi64((__m128i*)ptr, _mm_cast##suffix##_si128(a.val)); } \\\ninline void v_store_high(_Tp* ptr, const _Tpvec& a) \\\n{ \\\n    __m128i a1 = _mm_cast##suffix##_si128(a.val); \\\n    _mm_storel_epi64((__m128i*)ptr, _mm_unpackhi_epi64(a1, a1)); \\\n}\n\nOPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(v_float32x4, float, ps)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(v_float64x2, double, pd)\n\n#define OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(_Tpvec, scalartype, func, scalar_func) \\\ninline scalartype v_reduce_##func(const _Tpvec& a) \\\n{ \\\n    scalartype CV_DECL_ALIGNED(16) buf[4]; \\\n    v_store_aligned(buf, a); \\\n    scalartype s0 = scalar_func(buf[0], buf[1]); \\\n    scalartype s1 = scalar_func(buf[2], buf[3]); \\\n    return scalar_func(s0, s1); \\\n}\n\nOPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_uint32x4, unsigned, sum, OPENCV_HAL_ADD)\nOPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_uint32x4, unsigned, max, std::max)\nOPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_uint32x4, unsigned, min, std::min)\nOPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_int32x4, int, sum, OPENCV_HAL_ADD)\nOPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_int32x4, int, max, std::max)\nOPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_int32x4, int, min, std::min)\nOPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_float32x4, float, sum, OPENCV_HAL_ADD)\nOPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_float32x4, float, max, std::max)\nOPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_float32x4, float, min, std::min)\n\n#define OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(_Tpvec, suffix, pack_op, and_op, signmask, allmask) \\\ninline int v_signmask(const _Tpvec& a) \\\n{ \\\n    return and_op(_mm_movemask_##suffix(pack_op(a.val)), signmask); \\\n} \\\ninline bool v_check_all(const _Tpvec& a) \\\n{ return and_op(_mm_movemask_##suffix(a.val), allmask) == allmask; } \\\ninline bool v_check_any(const _Tpvec& a) \\\n{ return and_op(_mm_movemask_##suffix(a.val), allmask) != 0; }\n\n#define OPENCV_HAL_PACKS(a) _mm_packs_epi16(a, a)\ninline __m128i v_packq_epi32(__m128i a)\n{\n    __m128i b = _mm_packs_epi32(a, a);\n    return _mm_packs_epi16(b, b);\n}\n\nOPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint8x16, epi8, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 65535, 65535)\nOPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int8x16, epi8, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 65535, 65535)\nOPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint16x8, epi8, OPENCV_HAL_PACKS, OPENCV_HAL_AND, 255, (int)0xaaaa)\nOPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int16x8, epi8, OPENCV_HAL_PACKS, OPENCV_HAL_AND, 255, (int)0xaaaa)\nOPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint32x4, epi8, v_packq_epi32, OPENCV_HAL_AND, 15, (int)0x8888)\nOPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int32x4, epi8, v_packq_epi32, OPENCV_HAL_AND, 15, (int)0x8888)\nOPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float32x4, ps, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 15, 15)\nOPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float64x2, pd, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 3, 3)\n\n#define OPENCV_HAL_IMPL_SSE_SELECT(_Tpvec, suffix) \\\ninline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    return _Tpvec(_mm_xor_##suffix(b.val, _mm_and_##suffix(_mm_xor_##suffix(b.val, a.val), mask.val))); \\\n}\n\nOPENCV_HAL_IMPL_SSE_SELECT(v_uint8x16, si128)\nOPENCV_HAL_IMPL_SSE_SELECT(v_int8x16, si128)\nOPENCV_HAL_IMPL_SSE_SELECT(v_uint16x8, si128)\nOPENCV_HAL_IMPL_SSE_SELECT(v_int16x8, si128)\nOPENCV_HAL_IMPL_SSE_SELECT(v_uint32x4, si128)\nOPENCV_HAL_IMPL_SSE_SELECT(v_int32x4, si128)\n// OPENCV_HAL_IMPL_SSE_SELECT(v_uint64x2, si128)\n// OPENCV_HAL_IMPL_SSE_SELECT(v_int64x2, si128)\nOPENCV_HAL_IMPL_SSE_SELECT(v_float32x4, ps)\nOPENCV_HAL_IMPL_SSE_SELECT(v_float64x2, pd)\n\n#define OPENCV_HAL_IMPL_SSE_EXPAND(_Tpuvec, _Tpwuvec, _Tpu, _Tpsvec, _Tpwsvec, _Tps, suffix, wsuffix, shift) \\\ninline void v_expand(const _Tpuvec& a, _Tpwuvec& b0, _Tpwuvec& b1) \\\n{ \\\n    __m128i z = _mm_setzero_si128(); \\\n    b0.val = _mm_unpacklo_##suffix(a.val, z); \\\n    b1.val = _mm_unpackhi_##suffix(a.val, z); \\\n} \\\ninline _Tpwuvec v_load_expand(const _Tpu* ptr) \\\n{ \\\n    __m128i z = _mm_setzero_si128(); \\\n    return _Tpwuvec(_mm_unpacklo_##suffix(_mm_loadl_epi64((const __m128i*)ptr), z)); \\\n} \\\ninline void v_expand(const _Tpsvec& a, _Tpwsvec& b0, _Tpwsvec& b1) \\\n{ \\\n    b0.val = _mm_srai_##wsuffix(_mm_unpacklo_##suffix(a.val, a.val), shift); \\\n    b1.val = _mm_srai_##wsuffix(_mm_unpackhi_##suffix(a.val, a.val), shift); \\\n} \\\ninline _Tpwsvec v_load_expand(const _Tps* ptr) \\\n{ \\\n    __m128i a = _mm_loadl_epi64((const __m128i*)ptr); \\\n    return _Tpwsvec(_mm_srai_##wsuffix(_mm_unpacklo_##suffix(a, a), shift)); \\\n}\n\nOPENCV_HAL_IMPL_SSE_EXPAND(v_uint8x16, v_uint16x8, uchar, v_int8x16, v_int16x8, schar, epi8, epi16, 8)\nOPENCV_HAL_IMPL_SSE_EXPAND(v_uint16x8, v_uint32x4, ushort, v_int16x8, v_int32x4, short, epi16, epi32, 16)\n\ninline void v_expand(const v_uint32x4& a, v_uint64x2& b0, v_uint64x2& b1)\n{\n    __m128i z = _mm_setzero_si128();\n    b0.val = _mm_unpacklo_epi32(a.val, z);\n    b1.val = _mm_unpackhi_epi32(a.val, z);\n}\ninline v_uint64x2 v_load_expand(const unsigned* ptr)\n{\n    __m128i z = _mm_setzero_si128();\n    return v_uint64x2(_mm_unpacklo_epi32(_mm_loadl_epi64((const __m128i*)ptr), z));\n}\ninline void v_expand(const v_int32x4& a, v_int64x2& b0, v_int64x2& b1)\n{\n    __m128i s = _mm_srai_epi32(a.val, 31);\n    b0.val = _mm_unpacklo_epi32(a.val, s);\n    b1.val = _mm_unpackhi_epi32(a.val, s);\n}\ninline v_int64x2 v_load_expand(const int* ptr)\n{\n    __m128i a = _mm_loadl_epi64((const __m128i*)ptr);\n    __m128i s = _mm_srai_epi32(a, 31);\n    return v_int64x2(_mm_unpacklo_epi32(a, s));\n}\n\ninline v_uint32x4 v_load_expand_q(const uchar* ptr)\n{\n    __m128i z = _mm_setzero_si128();\n    __m128i a = _mm_cvtsi32_si128(*(const int*)ptr);\n    return v_uint32x4(_mm_unpacklo_epi16(_mm_unpacklo_epi8(a, z), z));\n}\n\ninline v_int32x4 v_load_expand_q(const schar* ptr)\n{\n    __m128i a = _mm_cvtsi32_si128(*(const int*)ptr);\n    a = _mm_unpacklo_epi8(a, a);\n    a = _mm_unpacklo_epi8(a, a);\n    return v_int32x4(_mm_srai_epi32(a, 24));\n}\n\n#define OPENCV_HAL_IMPL_SSE_UNPACKS(_Tpvec, suffix, cast_from, cast_to) \\\ninline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1) \\\n{ \\\n    b0.val = _mm_unpacklo_##suffix(a0.val, a1.val); \\\n    b1.val = _mm_unpackhi_##suffix(a0.val, a1.val); \\\n} \\\ninline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    __m128i a1 = cast_from(a.val), b1 = cast_from(b.val); \\\n    return _Tpvec(cast_to(_mm_unpacklo_epi64(a1, b1))); \\\n} \\\ninline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b) \\\n{ \\\n    __m128i a1 = cast_from(a.val), b1 = cast_from(b.val); \\\n    return _Tpvec(cast_to(_mm_unpackhi_epi64(a1, b1))); \\\n} \\\ninline void v_recombine(const _Tpvec& a, const _Tpvec& b, _Tpvec& c, _Tpvec& d) \\\n{ \\\n    __m128i a1 = cast_from(a.val), b1 = cast_from(b.val); \\\n    c.val = cast_to(_mm_unpacklo_epi64(a1, b1)); \\\n    d.val = cast_to(_mm_unpackhi_epi64(a1, b1)); \\\n}\n\nOPENCV_HAL_IMPL_SSE_UNPACKS(v_uint8x16, epi8, OPENCV_HAL_NOP, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_UNPACKS(v_int8x16, epi8, OPENCV_HAL_NOP, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_UNPACKS(v_uint16x8, epi16, OPENCV_HAL_NOP, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_UNPACKS(v_int16x8, epi16, OPENCV_HAL_NOP, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_UNPACKS(v_uint32x4, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_UNPACKS(v_int32x4, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_UNPACKS(v_float32x4, ps, _mm_castps_si128, _mm_castsi128_ps)\nOPENCV_HAL_IMPL_SSE_UNPACKS(v_float64x2, pd, _mm_castpd_si128, _mm_castsi128_pd)\n\ntemplate<int s, typename _Tpvec>\ninline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b)\n{\n    const int w = sizeof(typename _Tpvec::lane_type);\n    const int n = _Tpvec::nlanes;\n    __m128i ra, rb;\n    ra = _mm_srli_si128(a.val, s*w);\n    rb = _mm_slli_si128(b.val, (n-s)*w);\n    return _Tpvec(_mm_or_si128(ra, rb));\n}\n\ninline v_int32x4 v_round(const v_float32x4& a)\n{ return v_int32x4(_mm_cvtps_epi32(a.val)); }\n\ninline v_int32x4 v_floor(const v_float32x4& a)\n{\n    __m128i a1 = _mm_cvtps_epi32(a.val);\n    __m128i mask = _mm_castps_si128(_mm_cmpgt_ps(_mm_cvtepi32_ps(a1), a.val));\n    return v_int32x4(_mm_add_epi32(a1, mask));\n}\n\ninline v_int32x4 v_ceil(const v_float32x4& a)\n{\n    __m128i a1 = _mm_cvtps_epi32(a.val);\n    __m128i mask = _mm_castps_si128(_mm_cmpgt_ps(a.val, _mm_cvtepi32_ps(a1)));\n    return v_int32x4(_mm_sub_epi32(a1, mask));\n}\n\ninline v_int32x4 v_trunc(const v_float32x4& a)\n{ return v_int32x4(_mm_cvttps_epi32(a.val)); }\n\ninline v_int32x4 v_round(const v_float64x2& a)\n{ return v_int32x4(_mm_cvtpd_epi32(a.val)); }\n\ninline v_int32x4 v_floor(const v_float64x2& a)\n{\n    __m128i a1 = _mm_cvtpd_epi32(a.val);\n    __m128i mask = _mm_castpd_si128(_mm_cmpgt_pd(_mm_cvtepi32_pd(a1), a.val));\n    mask = _mm_srli_si128(_mm_slli_si128(mask, 4), 8); // m0 m0 m1 m1 => m0 m1 0 0\n    return v_int32x4(_mm_add_epi32(a1, mask));\n}\n\ninline v_int32x4 v_ceil(const v_float64x2& a)\n{\n    __m128i a1 = _mm_cvtpd_epi32(a.val);\n    __m128i mask = _mm_castpd_si128(_mm_cmpgt_pd(a.val, _mm_cvtepi32_pd(a1)));\n    mask = _mm_srli_si128(_mm_slli_si128(mask, 4), 8); // m0 m0 m1 m1 => m0 m1 0 0\n    return v_int32x4(_mm_sub_epi32(a1, mask));\n}\n\ninline v_int32x4 v_trunc(const v_float64x2& a)\n{ return v_int32x4(_mm_cvttpd_epi32(a.val)); }\n\n#define OPENCV_HAL_IMPL_SSE_TRANSPOSE4x4(_Tpvec, suffix, cast_from, cast_to) \\\ninline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \\\n                           const _Tpvec& a2, const _Tpvec& a3, \\\n                           _Tpvec& b0, _Tpvec& b1, \\\n                           _Tpvec& b2, _Tpvec& b3) \\\n{ \\\n    __m128i t0 = cast_from(_mm_unpacklo_##suffix(a0.val, a1.val)); \\\n    __m128i t1 = cast_from(_mm_unpacklo_##suffix(a2.val, a3.val)); \\\n    __m128i t2 = cast_from(_mm_unpackhi_##suffix(a0.val, a1.val)); \\\n    __m128i t3 = cast_from(_mm_unpackhi_##suffix(a2.val, a3.val)); \\\n\\\n    b0.val = cast_to(_mm_unpacklo_epi64(t0, t1)); \\\n    b1.val = cast_to(_mm_unpackhi_epi64(t0, t1)); \\\n    b2.val = cast_to(_mm_unpacklo_epi64(t2, t3)); \\\n    b3.val = cast_to(_mm_unpackhi_epi64(t2, t3)); \\\n}\n\nOPENCV_HAL_IMPL_SSE_TRANSPOSE4x4(v_uint32x4, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_TRANSPOSE4x4(v_int32x4, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP)\nOPENCV_HAL_IMPL_SSE_TRANSPOSE4x4(v_float32x4, ps, _mm_castps_si128, _mm_castsi128_ps)\n\n// adopted from sse_utils.hpp\ninline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c)\n{\n    __m128i t00 = _mm_loadu_si128((const __m128i*)ptr);\n    __m128i t01 = _mm_loadu_si128((const __m128i*)(ptr + 16));\n    __m128i t02 = _mm_loadu_si128((const __m128i*)(ptr + 32));\n\n    __m128i t10 = _mm_unpacklo_epi8(t00, _mm_unpackhi_epi64(t01, t01));\n    __m128i t11 = _mm_unpacklo_epi8(_mm_unpackhi_epi64(t00, t00), t02);\n    __m128i t12 = _mm_unpacklo_epi8(t01, _mm_unpackhi_epi64(t02, t02));\n\n    __m128i t20 = _mm_unpacklo_epi8(t10, _mm_unpackhi_epi64(t11, t11));\n    __m128i t21 = _mm_unpacklo_epi8(_mm_unpackhi_epi64(t10, t10), t12);\n    __m128i t22 = _mm_unpacklo_epi8(t11, _mm_unpackhi_epi64(t12, t12));\n\n    __m128i t30 = _mm_unpacklo_epi8(t20, _mm_unpackhi_epi64(t21, t21));\n    __m128i t31 = _mm_unpacklo_epi8(_mm_unpackhi_epi64(t20, t20), t22);\n    __m128i t32 = _mm_unpacklo_epi8(t21, _mm_unpackhi_epi64(t22, t22));\n\n    a.val = _mm_unpacklo_epi8(t30, _mm_unpackhi_epi64(t31, t31));\n    b.val = _mm_unpacklo_epi8(_mm_unpackhi_epi64(t30, t30), t32);\n    c.val = _mm_unpacklo_epi8(t31, _mm_unpackhi_epi64(t32, t32));\n}\n\ninline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c, v_uint8x16& d)\n{\n    __m128i u0 = _mm_loadu_si128((const __m128i*)ptr); // a0 b0 c0 d0 a1 b1 c1 d1 ...\n    __m128i u1 = _mm_loadu_si128((const __m128i*)(ptr + 16)); // a4 b4 c4 d4 ...\n    __m128i u2 = _mm_loadu_si128((const __m128i*)(ptr + 32)); // a8 b8 c8 d8 ...\n    __m128i u3 = _mm_loadu_si128((const __m128i*)(ptr + 48)); // a12 b12 c12 d12 ...\n\n    __m128i v0 = _mm_unpacklo_epi8(u0, u2); // a0 a8 b0 b8 ...\n    __m128i v1 = _mm_unpackhi_epi8(u0, u2); // a2 a10 b2 b10 ...\n    __m128i v2 = _mm_unpacklo_epi8(u1, u3); // a4 a12 b4 b12 ...\n    __m128i v3 = _mm_unpackhi_epi8(u1, u3); // a6 a14 b6 b14 ...\n\n    u0 = _mm_unpacklo_epi8(v0, v2); // a0 a4 a8 a12 ...\n    u1 = _mm_unpacklo_epi8(v1, v3); // a2 a6 a10 a14 ...\n    u2 = _mm_unpackhi_epi8(v0, v2); // a1 a5 a9 a13 ...\n    u3 = _mm_unpackhi_epi8(v1, v3); // a3 a7 a11 a15 ...\n\n    v0 = _mm_unpacklo_epi8(u0, u1); // a0 a2 a4 a6 ...\n    v1 = _mm_unpacklo_epi8(u2, u3); // a1 a3 a5 a7 ...\n    v2 = _mm_unpackhi_epi8(u0, u1); // c0 c2 c4 c6 ...\n    v3 = _mm_unpackhi_epi8(u2, u3); // c1 c3 c5 c7 ...\n\n    a.val = _mm_unpacklo_epi8(v0, v1);\n    b.val = _mm_unpackhi_epi8(v0, v1);\n    c.val = _mm_unpacklo_epi8(v2, v3);\n    d.val = _mm_unpackhi_epi8(v2, v3);\n}\n\ninline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b, v_uint16x8& c)\n{\n    __m128i t00 = _mm_loadu_si128((const __m128i*)ptr);\n    __m128i t01 = _mm_loadu_si128((const __m128i*)(ptr + 8));\n    __m128i t02 = _mm_loadu_si128((const __m128i*)(ptr + 16));\n\n    __m128i t10 = _mm_unpacklo_epi16(t00, _mm_unpackhi_epi64(t01, t01));\n    __m128i t11 = _mm_unpacklo_epi16(_mm_unpackhi_epi64(t00, t00), t02);\n    __m128i t12 = _mm_unpacklo_epi16(t01, _mm_unpackhi_epi64(t02, t02));\n\n    __m128i t20 = _mm_unpacklo_epi16(t10, _mm_unpackhi_epi64(t11, t11));\n    __m128i t21 = _mm_unpacklo_epi16(_mm_unpackhi_epi64(t10, t10), t12);\n    __m128i t22 = _mm_unpacklo_epi16(t11, _mm_unpackhi_epi64(t12, t12));\n\n    a.val = _mm_unpacklo_epi16(t20, _mm_unpackhi_epi64(t21, t21));\n    b.val = _mm_unpacklo_epi16(_mm_unpackhi_epi64(t20, t20), t22);\n    c.val = _mm_unpacklo_epi16(t21, _mm_unpackhi_epi64(t22, t22));\n}\n\ninline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b, v_uint16x8& c, v_uint16x8& d)\n{\n    __m128i u0 = _mm_loadu_si128((const __m128i*)ptr); // a0 b0 c0 d0 a1 b1 c1 d1\n    __m128i u1 = _mm_loadu_si128((const __m128i*)(ptr + 8)); // a2 b2 c2 d2 ...\n    __m128i u2 = _mm_loadu_si128((const __m128i*)(ptr + 16)); // a4 b4 c4 d4 ...\n    __m128i u3 = _mm_loadu_si128((const __m128i*)(ptr + 24)); // a6 b6 c6 d6 ...\n\n    __m128i v0 = _mm_unpacklo_epi16(u0, u2); // a0 a4 b0 b4 ...\n    __m128i v1 = _mm_unpackhi_epi16(u0, u2); // a1 a5 b1 b5 ...\n    __m128i v2 = _mm_unpacklo_epi16(u1, u3); // a2 a6 b2 b6 ...\n    __m128i v3 = _mm_unpackhi_epi16(u1, u3); // a3 a7 b3 b7 ...\n\n    u0 = _mm_unpacklo_epi16(v0, v2); // a0 a2 a4 a6 ...\n    u1 = _mm_unpacklo_epi16(v1, v3); // a1 a3 a5 a7 ...\n    u2 = _mm_unpackhi_epi16(v0, v2); // c0 c2 c4 c6 ...\n    u3 = _mm_unpackhi_epi16(v1, v3); // c1 c3 c5 c7 ...\n\n    a.val = _mm_unpacklo_epi16(u0, u1);\n    b.val = _mm_unpackhi_epi16(u0, u1);\n    c.val = _mm_unpacklo_epi16(u2, u3);\n    d.val = _mm_unpackhi_epi16(u2, u3);\n}\n\ninline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b, v_uint32x4& c)\n{\n    __m128i t00 = _mm_loadu_si128((const __m128i*)ptr);\n    __m128i t01 = _mm_loadu_si128((const __m128i*)(ptr + 4));\n    __m128i t02 = _mm_loadu_si128((const __m128i*)(ptr + 8));\n\n    __m128i t10 = _mm_unpacklo_epi32(t00, _mm_unpackhi_epi64(t01, t01));\n    __m128i t11 = _mm_unpacklo_epi32(_mm_unpackhi_epi64(t00, t00), t02);\n    __m128i t12 = _mm_unpacklo_epi32(t01, _mm_unpackhi_epi64(t02, t02));\n\n    a.val = _mm_unpacklo_epi32(t10, _mm_unpackhi_epi64(t11, t11));\n    b.val = _mm_unpacklo_epi32(_mm_unpackhi_epi64(t10, t10), t12);\n    c.val = _mm_unpacklo_epi32(t11, _mm_unpackhi_epi64(t12, t12));\n}\n\ninline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b, v_uint32x4& c, v_uint32x4& d)\n{\n    v_uint32x4 u0(_mm_loadu_si128((const __m128i*)ptr));        // a0 b0 c0 d0\n    v_uint32x4 u1(_mm_loadu_si128((const __m128i*)(ptr + 4))); // a1 b1 c1 d1\n    v_uint32x4 u2(_mm_loadu_si128((const __m128i*)(ptr + 8))); // a2 b2 c2 d2\n    v_uint32x4 u3(_mm_loadu_si128((const __m128i*)(ptr + 12))); // a3 b3 c3 d3\n\n    v_transpose4x4(u0, u1, u2, u3, a, b, c, d);\n}\n\ninline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b,\n                                const v_uint8x16& c )\n{\n    __m128i z = _mm_setzero_si128();\n    __m128i ab0 = _mm_unpacklo_epi8(a.val, b.val);\n    __m128i ab1 = _mm_unpackhi_epi8(a.val, b.val);\n    __m128i c0 = _mm_unpacklo_epi8(c.val, z);\n    __m128i c1 = _mm_unpackhi_epi8(c.val, z);\n\n    __m128i p00 = _mm_unpacklo_epi16(ab0, c0);\n    __m128i p01 = _mm_unpackhi_epi16(ab0, c0);\n    __m128i p02 = _mm_unpacklo_epi16(ab1, c1);\n    __m128i p03 = _mm_unpackhi_epi16(ab1, c1);\n\n    __m128i p10 = _mm_unpacklo_epi32(p00, p01);\n    __m128i p11 = _mm_unpackhi_epi32(p00, p01);\n    __m128i p12 = _mm_unpacklo_epi32(p02, p03);\n    __m128i p13 = _mm_unpackhi_epi32(p02, p03);\n\n    __m128i p20 = _mm_unpacklo_epi64(p10, p11);\n    __m128i p21 = _mm_unpackhi_epi64(p10, p11);\n    __m128i p22 = _mm_unpacklo_epi64(p12, p13);\n    __m128i p23 = _mm_unpackhi_epi64(p12, p13);\n\n    p20 = _mm_slli_si128(p20, 1);\n    p22 = _mm_slli_si128(p22, 1);\n\n    __m128i p30 = _mm_slli_epi64(_mm_unpacklo_epi32(p20, p21), 8);\n    __m128i p31 = _mm_srli_epi64(_mm_unpackhi_epi32(p20, p21), 8);\n    __m128i p32 = _mm_slli_epi64(_mm_unpacklo_epi32(p22, p23), 8);\n    __m128i p33 = _mm_srli_epi64(_mm_unpackhi_epi32(p22, p23), 8);\n\n    __m128i p40 = _mm_unpacklo_epi64(p30, p31);\n    __m128i p41 = _mm_unpackhi_epi64(p30, p31);\n    __m128i p42 = _mm_unpacklo_epi64(p32, p33);\n    __m128i p43 = _mm_unpackhi_epi64(p32, p33);\n\n    __m128i v0 = _mm_or_si128(_mm_srli_si128(p40, 2), _mm_slli_si128(p41, 10));\n    __m128i v1 = _mm_or_si128(_mm_srli_si128(p41, 6), _mm_slli_si128(p42, 6));\n    __m128i v2 = _mm_or_si128(_mm_srli_si128(p42, 10), _mm_slli_si128(p43, 2));\n\n    _mm_storeu_si128((__m128i*)(ptr), v0);\n    _mm_storeu_si128((__m128i*)(ptr + 16), v1);\n    _mm_storeu_si128((__m128i*)(ptr + 32), v2);\n}\n\ninline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b,\n                                const v_uint8x16& c, const v_uint8x16& d)\n{\n    // a0 a1 a2 a3 ....\n    // b0 b1 b2 b3 ....\n    // c0 c1 c2 c3 ....\n    // d0 d1 d2 d3 ....\n    __m128i u0 = _mm_unpacklo_epi8(a.val, c.val); // a0 c0 a1 c1 ...\n    __m128i u1 = _mm_unpackhi_epi8(a.val, c.val); // a8 c8 a9 c9 ...\n    __m128i u2 = _mm_unpacklo_epi8(b.val, d.val); // b0 d0 b1 d1 ...\n    __m128i u3 = _mm_unpackhi_epi8(b.val, d.val); // b8 d8 b9 d9 ...\n\n    __m128i v0 = _mm_unpacklo_epi8(u0, u2); // a0 b0 c0 d0 ...\n    __m128i v1 = _mm_unpacklo_epi8(u1, u3); // a8 b8 c8 d8 ...\n    __m128i v2 = _mm_unpackhi_epi8(u0, u2); // a4 b4 c4 d4 ...\n    __m128i v3 = _mm_unpackhi_epi8(u1, u3); // a12 b12 c12 d12 ...\n\n    _mm_storeu_si128((__m128i*)ptr, v0);\n    _mm_storeu_si128((__m128i*)(ptr + 16), v2);\n    _mm_storeu_si128((__m128i*)(ptr + 32), v1);\n    _mm_storeu_si128((__m128i*)(ptr + 48), v3);\n}\n\ninline void v_store_interleave( ushort* ptr, const v_uint16x8& a,\n                                const v_uint16x8& b,\n                                const v_uint16x8& c )\n{\n    __m128i z = _mm_setzero_si128();\n    __m128i ab0 = _mm_unpacklo_epi16(a.val, b.val);\n    __m128i ab1 = _mm_unpackhi_epi16(a.val, b.val);\n    __m128i c0 = _mm_unpacklo_epi16(c.val, z);\n    __m128i c1 = _mm_unpackhi_epi16(c.val, z);\n\n    __m128i p10 = _mm_unpacklo_epi32(ab0, c0);\n    __m128i p11 = _mm_unpackhi_epi32(ab0, c0);\n    __m128i p12 = _mm_unpacklo_epi32(ab1, c1);\n    __m128i p13 = _mm_unpackhi_epi32(ab1, c1);\n\n    __m128i p20 = _mm_unpacklo_epi64(p10, p11);\n    __m128i p21 = _mm_unpackhi_epi64(p10, p11);\n    __m128i p22 = _mm_unpacklo_epi64(p12, p13);\n    __m128i p23 = _mm_unpackhi_epi64(p12, p13);\n\n    p20 = _mm_slli_si128(p20, 2);\n    p22 = _mm_slli_si128(p22, 2);\n\n    __m128i p30 = _mm_unpacklo_epi64(p20, p21);\n    __m128i p31 = _mm_unpackhi_epi64(p20, p21);\n    __m128i p32 = _mm_unpacklo_epi64(p22, p23);\n    __m128i p33 = _mm_unpackhi_epi64(p22, p23);\n\n    __m128i v0 = _mm_or_si128(_mm_srli_si128(p30, 2), _mm_slli_si128(p31, 10));\n    __m128i v1 = _mm_or_si128(_mm_srli_si128(p31, 6), _mm_slli_si128(p32, 6));\n    __m128i v2 = _mm_or_si128(_mm_srli_si128(p32, 10), _mm_slli_si128(p33, 2));\n\n    _mm_storeu_si128((__m128i*)(ptr), v0);\n    _mm_storeu_si128((__m128i*)(ptr + 8), v1);\n    _mm_storeu_si128((__m128i*)(ptr + 16), v2);\n}\n\ninline void v_store_interleave( ushort* ptr, const v_uint16x8& a, const v_uint16x8& b,\n                                const v_uint16x8& c, const v_uint16x8& d)\n{\n    // a0 a1 a2 a3 ....\n    // b0 b1 b2 b3 ....\n    // c0 c1 c2 c3 ....\n    // d0 d1 d2 d3 ....\n    __m128i u0 = _mm_unpacklo_epi16(a.val, c.val); // a0 c0 a1 c1 ...\n    __m128i u1 = _mm_unpackhi_epi16(a.val, c.val); // a4 c4 a5 c5 ...\n    __m128i u2 = _mm_unpacklo_epi16(b.val, d.val); // b0 d0 b1 d1 ...\n    __m128i u3 = _mm_unpackhi_epi16(b.val, d.val); // b4 d4 b5 d5 ...\n\n    __m128i v0 = _mm_unpacklo_epi16(u0, u2); // a0 b0 c0 d0 ...\n    __m128i v1 = _mm_unpacklo_epi16(u1, u3); // a4 b4 c4 d4 ...\n    __m128i v2 = _mm_unpackhi_epi16(u0, u2); // a2 b2 c2 d2 ...\n    __m128i v3 = _mm_unpackhi_epi16(u1, u3); // a6 b6 c6 d6 ...\n\n    _mm_storeu_si128((__m128i*)ptr, v0);\n    _mm_storeu_si128((__m128i*)(ptr + 8), v2);\n    _mm_storeu_si128((__m128i*)(ptr + 16), v1);\n    _mm_storeu_si128((__m128i*)(ptr + 24), v3);\n}\n\ninline void v_store_interleave( unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b,\n                                const v_uint32x4& c )\n{\n    v_uint32x4 z = v_setzero_u32(), u0, u1, u2, u3;\n    v_transpose4x4(a, b, c, z, u0, u1, u2, u3);\n\n    __m128i v0 = _mm_or_si128(u0.val, _mm_slli_si128(u1.val, 12));\n    __m128i v1 = _mm_or_si128(_mm_srli_si128(u1.val, 4), _mm_slli_si128(u2.val, 8));\n    __m128i v2 = _mm_or_si128(_mm_srli_si128(u2.val, 8), _mm_slli_si128(u3.val, 4));\n\n    _mm_storeu_si128((__m128i*)ptr, v0);\n    _mm_storeu_si128((__m128i*)(ptr + 4), v1);\n    _mm_storeu_si128((__m128i*)(ptr + 8), v2);\n}\n\ninline void v_store_interleave(unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b,\n                               const v_uint32x4& c, const v_uint32x4& d)\n{\n    v_uint32x4 t0, t1, t2, t3;\n    v_transpose4x4(a, b, c, d, t0, t1, t2, t3);\n    v_store(ptr, t0);\n    v_store(ptr + 4, t1);\n    v_store(ptr + 8, t2);\n    v_store(ptr + 12, t3);\n}\n\n#define OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(_Tpvec, _Tp, suffix, _Tpuvec, _Tpu, usuffix) \\\ninline void v_load_deinterleave( const _Tp* ptr, _Tpvec& a0, \\\n                                 _Tpvec& b0, _Tpvec& c0 ) \\\n{ \\\n    _Tpuvec a1, b1, c1; \\\n    v_load_deinterleave((const _Tpu*)ptr, a1, b1, c1); \\\n    a0 = v_reinterpret_as_##suffix(a1); \\\n    b0 = v_reinterpret_as_##suffix(b1); \\\n    c0 = v_reinterpret_as_##suffix(c1); \\\n} \\\ninline void v_load_deinterleave( const _Tp* ptr, _Tpvec& a0, \\\n                                 _Tpvec& b0, _Tpvec& c0, _Tpvec& d0 ) \\\n{ \\\n    _Tpuvec a1, b1, c1, d1; \\\n    v_load_deinterleave((const _Tpu*)ptr, a1, b1, c1, d1); \\\n    a0 = v_reinterpret_as_##suffix(a1); \\\n    b0 = v_reinterpret_as_##suffix(b1); \\\n    c0 = v_reinterpret_as_##suffix(c1); \\\n    d0 = v_reinterpret_as_##suffix(d1); \\\n} \\\ninline void v_store_interleave( _Tp* ptr, const _Tpvec& a0, \\\n                               const _Tpvec& b0, const _Tpvec& c0 ) \\\n{ \\\n    _Tpuvec a1 = v_reinterpret_as_##usuffix(a0); \\\n    _Tpuvec b1 = v_reinterpret_as_##usuffix(b0); \\\n    _Tpuvec c1 = v_reinterpret_as_##usuffix(c0); \\\n    v_store_interleave((_Tpu*)ptr, a1, b1, c1); \\\n} \\\ninline void v_store_interleave( _Tp* ptr, const _Tpvec& a0, const _Tpvec& b0, \\\n                               const _Tpvec& c0, const _Tpvec& d0 ) \\\n{ \\\n    _Tpuvec a1 = v_reinterpret_as_##usuffix(a0); \\\n    _Tpuvec b1 = v_reinterpret_as_##usuffix(b0); \\\n    _Tpuvec c1 = v_reinterpret_as_##usuffix(c0); \\\n    _Tpuvec d1 = v_reinterpret_as_##usuffix(d0); \\\n    v_store_interleave((_Tpu*)ptr, a1, b1, c1, d1); \\\n}\n\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(v_int8x16, schar, s8, v_uint8x16, uchar, u8)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(v_int16x8, short, s16, v_uint16x8, ushort, u16)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(v_int32x4, int, s32, v_uint32x4, unsigned, u32)\nOPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(v_float32x4, float, f32, v_uint32x4, unsigned, u32)\n\ninline v_float32x4 v_cvt_f32(const v_int32x4& a)\n{\n    return v_float32x4(_mm_cvtepi32_ps(a.val));\n}\n\ninline v_float32x4 v_cvt_f32(const v_float64x2& a)\n{\n    return v_float32x4(_mm_cvtpd_ps(a.val));\n}\n\ninline v_float64x2 v_cvt_f64(const v_int32x4& a)\n{\n    return v_float64x2(_mm_cvtepi32_pd(a.val));\n}\n\ninline v_float64x2 v_cvt_f64(const v_float32x4& a)\n{\n    return v_float64x2(_mm_cvtps_pd(a.val));\n}\n\n//! @endcond\n\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/ippasync.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2015, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_IPPASYNC_HPP__\n#define __OPENCV_CORE_IPPASYNC_HPP__\n\n#ifdef HAVE_IPP_A\n\n#include \"opencv2/core.hpp\"\n#include <ipp_async_op.h>\n#include <ipp_async_accel.h>\n\nnamespace cv\n{\n\nnamespace hpp\n{\n\n/** @addtogroup core_ipp\nThis section describes conversion between OpenCV and [Intel&reg; IPP Asynchronous\nC/C++](http://software.intel.com/en-us/intel-ipp-preview) library. [Getting Started\nGuide](http://registrationcenter.intel.com/irc_nas/3727/ipp_async_get_started.htm) help you to\ninstall the library, configure header and library build paths.\n */\n//! @{\n\n    //! convert OpenCV data type to hppDataType\n    inline int toHppType(const int cvType)\n    {\n        int depth = CV_MAT_DEPTH(cvType);\n        int hppType = depth == CV_8U ? HPP_DATA_TYPE_8U :\n                     depth == CV_16U ? HPP_DATA_TYPE_16U :\n                     depth == CV_16S ? HPP_DATA_TYPE_16S :\n                     depth == CV_32S ? HPP_DATA_TYPE_32S :\n                     depth == CV_32F ? HPP_DATA_TYPE_32F :\n                     depth == CV_64F ? HPP_DATA_TYPE_64F : -1;\n        CV_Assert( hppType >= 0 );\n        return hppType;\n    }\n\n    //! convert hppDataType to OpenCV data type\n    inline int toCvType(const int hppType)\n    {\n        int cvType = hppType == HPP_DATA_TYPE_8U ? CV_8U :\n                    hppType == HPP_DATA_TYPE_16U ? CV_16U :\n                    hppType == HPP_DATA_TYPE_16S ? CV_16S :\n                    hppType == HPP_DATA_TYPE_32S ? CV_32S :\n                    hppType == HPP_DATA_TYPE_32F ? CV_32F :\n                    hppType == HPP_DATA_TYPE_64F ? CV_64F : -1;\n        CV_Assert( cvType >= 0 );\n        return cvType;\n    }\n\n    /** @brief Convert hppiMatrix to Mat.\n\n    This function allocates and initializes new matrix (if needed) that has the same size and type as\n    input matrix. Supports CV_8U, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F.\n    @param src input hppiMatrix.\n    @param dst output matrix.\n    @param accel accelerator instance (see hpp::getHpp for the list of acceleration framework types).\n    @param cn number of channels.\n     */\n    inline void copyHppToMat(hppiMatrix* src, Mat& dst, hppAccel accel, int cn)\n    {\n        hppDataType type;\n        hpp32u width, height;\n        hppStatus sts;\n\n        if (src == NULL)\n            return dst.release();\n\n        sts = hppiInquireMatrix(src, &type, &width, &height);\n\n        CV_Assert( sts == HPP_STATUS_NO_ERROR);\n\n        int matType = CV_MAKETYPE(toCvType(type), cn);\n\n        CV_Assert(width%cn == 0);\n\n        width /= cn;\n\n        dst.create((int)height, (int)width, (int)matType);\n\n        size_t newSize = (size_t)(height*(hpp32u)(dst.step));\n\n        sts = hppiGetMatrixData(accel,src,(hpp32u)(dst.step),dst.data,&newSize);\n\n        CV_Assert( sts == HPP_STATUS_NO_ERROR);\n    }\n\n    /** @brief Create Mat from hppiMatrix.\n\n    This function allocates and initializes the Mat that has the same size and type as input matrix.\n    Supports CV_8U, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F.\n    @param src input hppiMatrix.\n    @param accel accelerator instance (see hpp::getHpp for the list of acceleration framework types).\n    @param cn number of channels.\n    @sa howToUseIPPAconversion, hpp::copyHppToMat, hpp::getHpp.\n     */\n    inline Mat getMat(hppiMatrix* src, hppAccel accel, int cn)\n    {\n        Mat dst;\n        copyHppToMat(src, dst, accel, cn);\n        return dst;\n    }\n\n    /** @brief Create hppiMatrix from Mat.\n\n    This function allocates and initializes the hppiMatrix that has the same size and type as input\n    matrix, returns the hppiMatrix*.\n\n    If you want to use zero-copy for GPU you should to have 4KB aligned matrix data. See details\n    [hppiCreateSharedMatrix](http://software.intel.com/ru-ru/node/501697).\n\n    Supports CV_8U, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F.\n\n    @note The hppiMatrix pointer to the image buffer in system memory refers to the src.data. Control\n    the lifetime of the matrix and don't change its data, if there is no special need.\n    @param src input matrix.\n    @param accel accelerator instance. Supports type:\n    -   **HPP_ACCEL_TYPE_CPU** - accelerated by optimized CPU instructions.\n    -   **HPP_ACCEL_TYPE_GPU** - accelerated by GPU programmable units or fixed-function\n        accelerators.\n    -   **HPP_ACCEL_TYPE_ANY** - any acceleration or no acceleration available.\n    @sa howToUseIPPAconversion, hpp::getMat\n     */\n    inline hppiMatrix* getHpp(const Mat& src, hppAccel accel)\n    {\n        int htype = toHppType(src.type());\n        int cn = src.channels();\n\n        CV_Assert(src.data);\n        hppAccelType accelType = hppQueryAccelType(accel);\n\n        if (accelType!=HPP_ACCEL_TYPE_CPU)\n        {\n            hpp32u pitch, size;\n            hppQueryMatrixAllocParams(accel, src.cols*cn, src.rows, htype, &pitch, &size);\n            if (pitch!=0 && size!=0)\n                if ((int)(src.data)%4096==0 && pitch==(hpp32u)(src.step))\n                {\n                    return hppiCreateSharedMatrix(htype, src.cols*cn, src.rows, src.data, pitch, size);\n                }\n        }\n\n        return hppiCreateMatrix(htype, src.cols*cn, src.rows, src.data, (hpp32s)(src.step));;\n    }\n\n//! @}\n}}\n\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/mat.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_MAT_HPP__\n#define __OPENCV_CORE_MAT_HPP__\n\n#ifndef __cplusplus\n#  error mat.hpp header must be compiled as C++\n#endif\n\n#include \"opencv2/core/matx.hpp\"\n#include \"opencv2/core/types.hpp\"\n\n#include \"opencv2/core/bufferpool.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup core_basic\n//! @{\n\nenum { ACCESS_READ=1<<24, ACCESS_WRITE=1<<25,\n    ACCESS_RW=3<<24, ACCESS_MASK=ACCESS_RW, ACCESS_FAST=1<<26 };\n\nclass CV_EXPORTS _OutputArray;\n\n//////////////////////// Input/Output Array Arguments /////////////////////////////////\n\n/** @brief This is the proxy class for passing read-only input arrays into OpenCV functions.\n\nIt is defined as:\n@code\n    typedef const _InputArray& InputArray;\n@endcode\nwhere _InputArray is a class that can be constructed from `Mat`, `Mat_<T>`, `Matx<T, m, n>`,\n`std::vector<T>`, `std::vector<std::vector<T> >` or `std::vector<Mat>`. It can also be constructed\nfrom a matrix expression.\n\nSince this is mostly implementation-level class, and its interface may change in future versions, we\ndo not describe it in details. There are a few key things, though, that should be kept in mind:\n\n-   When you see in the reference manual or in OpenCV source code a function that takes\n    InputArray, it means that you can actually pass `Mat`, `Matx`, `vector<T>` etc. (see above the\n    complete list).\n-   Optional input arguments: If some of the input arrays may be empty, pass cv::noArray() (or\n    simply cv::Mat() as you probably did before).\n-   The class is designed solely for passing parameters. That is, normally you *should not*\n    declare class members, local and global variables of this type.\n-   If you want to design your own function or a class method that can operate of arrays of\n    multiple types, you can use InputArray (or OutputArray) for the respective parameters. Inside\n    a function you should use _InputArray::getMat() method to construct a matrix header for the\n    array (without copying data). _InputArray::kind() can be used to distinguish Mat from\n    `vector<>` etc., but normally it is not needed.\n\nHere is how you can use a function that takes InputArray :\n@code\n    std::vector<Point2f> vec;\n    // points or a circle\n    for( int i = 0; i < 30; i++ )\n        vec.push_back(Point2f((float)(100 + 30*cos(i*CV_PI*2/5)),\n                              (float)(100 - 30*sin(i*CV_PI*2/5))));\n    cv::transform(vec, vec, cv::Matx23f(0.707, -0.707, 10, 0.707, 0.707, 20));\n@endcode\nThat is, we form an STL vector containing points, and apply in-place affine transformation to the\nvector using the 2x3 matrix created inline as `Matx<float, 2, 3>` instance.\n\nHere is how such a function can be implemented (for simplicity, we implement a very specific case of\nit, according to the assertion statement inside) :\n@code\n    void myAffineTransform(InputArray _src, OutputArray _dst, InputArray _m)\n    {\n        // get Mat headers for input arrays. This is O(1) operation,\n        // unless _src and/or _m are matrix expressions.\n        Mat src = _src.getMat(), m = _m.getMat();\n        CV_Assert( src.type() == CV_32FC2 && m.type() == CV_32F && m.size() == Size(3, 2) );\n\n        // [re]create the output array so that it has the proper size and type.\n        // In case of Mat it calls Mat::create, in case of STL vector it calls vector::resize.\n        _dst.create(src.size(), src.type());\n        Mat dst = _dst.getMat();\n\n        for( int i = 0; i < src.rows; i++ )\n            for( int j = 0; j < src.cols; j++ )\n            {\n                Point2f pt = src.at<Point2f>(i, j);\n                dst.at<Point2f>(i, j) = Point2f(m.at<float>(0, 0)*pt.x +\n                                                m.at<float>(0, 1)*pt.y +\n                                                m.at<float>(0, 2),\n                                                m.at<float>(1, 0)*pt.x +\n                                                m.at<float>(1, 1)*pt.y +\n                                                m.at<float>(1, 2));\n            }\n    }\n@endcode\nThere is another related type, InputArrayOfArrays, which is currently defined as a synonym for\nInputArray:\n@code\n    typedef InputArray InputArrayOfArrays;\n@endcode\nIt denotes function arguments that are either vectors of vectors or vectors of matrices. A separate\nsynonym is needed to generate Python/Java etc. wrappers properly. At the function implementation\nlevel their use is similar, but _InputArray::getMat(idx) should be used to get header for the\nidx-th component of the outer vector and _InputArray::size().area() should be used to find the\nnumber of components (vectors/matrices) of the outer vector.\n */\nclass CV_EXPORTS _InputArray\n{\npublic:\n    enum {\n        KIND_SHIFT = 16,\n        FIXED_TYPE = 0x8000 << KIND_SHIFT,\n        FIXED_SIZE = 0x4000 << KIND_SHIFT,\n        KIND_MASK = 31 << KIND_SHIFT,\n\n        NONE              = 0 << KIND_SHIFT,\n        MAT               = 1 << KIND_SHIFT,\n        MATX              = 2 << KIND_SHIFT,\n        STD_VECTOR        = 3 << KIND_SHIFT,\n        STD_VECTOR_VECTOR = 4 << KIND_SHIFT,\n        STD_VECTOR_MAT    = 5 << KIND_SHIFT,\n        EXPR              = 6 << KIND_SHIFT,\n        OPENGL_BUFFER     = 7 << KIND_SHIFT,\n        CUDA_HOST_MEM     = 8 << KIND_SHIFT,\n        CUDA_GPU_MAT      = 9 << KIND_SHIFT,\n        UMAT              =10 << KIND_SHIFT,\n        STD_VECTOR_UMAT   =11 << KIND_SHIFT,\n        STD_BOOL_VECTOR   =12 << KIND_SHIFT,\n        STD_VECTOR_CUDA_GPU_MAT = 13 << KIND_SHIFT\n    };\n\n    _InputArray();\n    _InputArray(int _flags, void* _obj);\n    _InputArray(const Mat& m);\n    _InputArray(const MatExpr& expr);\n    _InputArray(const std::vector<Mat>& vec);\n    template<typename _Tp> _InputArray(const Mat_<_Tp>& m);\n    template<typename _Tp> _InputArray(const std::vector<_Tp>& vec);\n    _InputArray(const std::vector<bool>& vec);\n    template<typename _Tp> _InputArray(const std::vector<std::vector<_Tp> >& vec);\n    template<typename _Tp> _InputArray(const std::vector<Mat_<_Tp> >& vec);\n    template<typename _Tp> _InputArray(const _Tp* vec, int n);\n    template<typename _Tp, int m, int n> _InputArray(const Matx<_Tp, m, n>& matx);\n    _InputArray(const double& val);\n    _InputArray(const cuda::GpuMat& d_mat);\n    _InputArray(const std::vector<cuda::GpuMat>& d_mat_array);\n    _InputArray(const ogl::Buffer& buf);\n    _InputArray(const cuda::HostMem& cuda_mem);\n    template<typename _Tp> _InputArray(const cudev::GpuMat_<_Tp>& m);\n    _InputArray(const UMat& um);\n    _InputArray(const std::vector<UMat>& umv);\n\n    Mat getMat(int idx=-1) const;\n    Mat getMat_(int idx=-1) const;\n    UMat getUMat(int idx=-1) const;\n    void getMatVector(std::vector<Mat>& mv) const;\n    void getUMatVector(std::vector<UMat>& umv) const;\n    void getGpuMatVector(std::vector<cuda::GpuMat>& gpumv) const;\n    cuda::GpuMat getGpuMat() const;\n    ogl::Buffer getOGlBuffer() const;\n\n    int getFlags() const;\n    void* getObj() const;\n    Size getSz() const;\n\n    int kind() const;\n    int dims(int i=-1) const;\n    int cols(int i=-1) const;\n    int rows(int i=-1) const;\n    Size size(int i=-1) const;\n    int sizend(int* sz, int i=-1) const;\n    bool sameSize(const _InputArray& arr) const;\n    size_t total(int i=-1) const;\n    int type(int i=-1) const;\n    int depth(int i=-1) const;\n    int channels(int i=-1) const;\n    bool isContinuous(int i=-1) const;\n    bool isSubmatrix(int i=-1) const;\n    bool empty() const;\n    void copyTo(const _OutputArray& arr) const;\n    void copyTo(const _OutputArray& arr, const _InputArray & mask) const;\n    size_t offset(int i=-1) const;\n    size_t step(int i=-1) const;\n    bool isMat() const;\n    bool isUMat() const;\n    bool isMatVector() const;\n    bool isUMatVector() const;\n    bool isMatx() const;\n    bool isVector() const;\n    bool isGpuMatVector() const;\n    ~_InputArray();\n\nprotected:\n    int flags;\n    void* obj;\n    Size sz;\n\n    void init(int _flags, const void* _obj);\n    void init(int _flags, const void* _obj, Size _sz);\n};\n\n\n/** @brief This type is very similar to InputArray except that it is used for input/output and output function\nparameters.\n\nJust like with InputArray, OpenCV users should not care about OutputArray, they just pass `Mat`,\n`vector<T>` etc. to the functions. The same limitation as for `InputArray`: *Do not explicitly\ncreate OutputArray instances* applies here too.\n\nIf you want to make your function polymorphic (i.e. accept different arrays as output parameters),\nit is also not very difficult. Take the sample above as the reference. Note that\n_OutputArray::create() needs to be called before _OutputArray::getMat(). This way you guarantee\nthat the output array is properly allocated.\n\nOptional output parameters. If you do not need certain output array to be computed and returned to\nyou, pass cv::noArray(), just like you would in the case of optional input array. At the\nimplementation level, use _OutputArray::needed() to check if certain output array needs to be\ncomputed or not.\n\nThere are several synonyms for OutputArray that are used to assist automatic Python/Java/... wrapper\ngenerators:\n@code\n    typedef OutputArray OutputArrayOfArrays;\n    typedef OutputArray InputOutputArray;\n    typedef OutputArray InputOutputArrayOfArrays;\n@endcode\n */\nclass CV_EXPORTS _OutputArray : public _InputArray\n{\npublic:\n    enum\n    {\n        DEPTH_MASK_8U = 1 << CV_8U,\n        DEPTH_MASK_8S = 1 << CV_8S,\n        DEPTH_MASK_16U = 1 << CV_16U,\n        DEPTH_MASK_16S = 1 << CV_16S,\n        DEPTH_MASK_32S = 1 << CV_32S,\n        DEPTH_MASK_32F = 1 << CV_32F,\n        DEPTH_MASK_64F = 1 << CV_64F,\n        DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1,\n        DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S,\n        DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F\n    };\n\n    _OutputArray();\n    _OutputArray(int _flags, void* _obj);\n    _OutputArray(Mat& m);\n    _OutputArray(std::vector<Mat>& vec);\n    _OutputArray(cuda::GpuMat& d_mat);\n    _OutputArray(std::vector<cuda::GpuMat>& d_mat);\n    _OutputArray(ogl::Buffer& buf);\n    _OutputArray(cuda::HostMem& cuda_mem);\n    template<typename _Tp> _OutputArray(cudev::GpuMat_<_Tp>& m);\n    template<typename _Tp> _OutputArray(std::vector<_Tp>& vec);\n    _OutputArray(std::vector<bool>& vec);\n    template<typename _Tp> _OutputArray(std::vector<std::vector<_Tp> >& vec);\n    template<typename _Tp> _OutputArray(std::vector<Mat_<_Tp> >& vec);\n    template<typename _Tp> _OutputArray(Mat_<_Tp>& m);\n    template<typename _Tp> _OutputArray(_Tp* vec, int n);\n    template<typename _Tp, int m, int n> _OutputArray(Matx<_Tp, m, n>& matx);\n    _OutputArray(UMat& m);\n    _OutputArray(std::vector<UMat>& vec);\n\n    _OutputArray(const Mat& m);\n    _OutputArray(const std::vector<Mat>& vec);\n    _OutputArray(const cuda::GpuMat& d_mat);\n    _OutputArray(const std::vector<cuda::GpuMat>& d_mat);\n    _OutputArray(const ogl::Buffer& buf);\n    _OutputArray(const cuda::HostMem& cuda_mem);\n    template<typename _Tp> _OutputArray(const cudev::GpuMat_<_Tp>& m);\n    template<typename _Tp> _OutputArray(const std::vector<_Tp>& vec);\n    template<typename _Tp> _OutputArray(const std::vector<std::vector<_Tp> >& vec);\n    template<typename _Tp> _OutputArray(const std::vector<Mat_<_Tp> >& vec);\n    template<typename _Tp> _OutputArray(const Mat_<_Tp>& m);\n    template<typename _Tp> _OutputArray(const _Tp* vec, int n);\n    template<typename _Tp, int m, int n> _OutputArray(const Matx<_Tp, m, n>& matx);\n    _OutputArray(const UMat& m);\n    _OutputArray(const std::vector<UMat>& vec);\n\n    bool fixedSize() const;\n    bool fixedType() const;\n    bool needed() const;\n    Mat& getMatRef(int i=-1) const;\n    UMat& getUMatRef(int i=-1) const;\n    cuda::GpuMat& getGpuMatRef() const;\n    std::vector<cuda::GpuMat>& getGpuMatVecRef() const;\n    ogl::Buffer& getOGlBufferRef() const;\n    cuda::HostMem& getHostMemRef() const;\n    void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;\n    void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;\n    void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;\n    void createSameSize(const _InputArray& arr, int mtype) const;\n    void release() const;\n    void clear() const;\n    void setTo(const _InputArray& value, const _InputArray & mask = _InputArray()) const;\n\n    void assign(const UMat& u) const;\n    void assign(const Mat& m) const;\n};\n\n\nclass CV_EXPORTS _InputOutputArray : public _OutputArray\n{\npublic:\n    _InputOutputArray();\n    _InputOutputArray(int _flags, void* _obj);\n    _InputOutputArray(Mat& m);\n    _InputOutputArray(std::vector<Mat>& vec);\n    _InputOutputArray(cuda::GpuMat& d_mat);\n    _InputOutputArray(ogl::Buffer& buf);\n    _InputOutputArray(cuda::HostMem& cuda_mem);\n    template<typename _Tp> _InputOutputArray(cudev::GpuMat_<_Tp>& m);\n    template<typename _Tp> _InputOutputArray(std::vector<_Tp>& vec);\n    _InputOutputArray(std::vector<bool>& vec);\n    template<typename _Tp> _InputOutputArray(std::vector<std::vector<_Tp> >& vec);\n    template<typename _Tp> _InputOutputArray(std::vector<Mat_<_Tp> >& vec);\n    template<typename _Tp> _InputOutputArray(Mat_<_Tp>& m);\n    template<typename _Tp> _InputOutputArray(_Tp* vec, int n);\n    template<typename _Tp, int m, int n> _InputOutputArray(Matx<_Tp, m, n>& matx);\n    _InputOutputArray(UMat& m);\n    _InputOutputArray(std::vector<UMat>& vec);\n\n    _InputOutputArray(const Mat& m);\n    _InputOutputArray(const std::vector<Mat>& vec);\n    _InputOutputArray(const cuda::GpuMat& d_mat);\n    _InputOutputArray(const std::vector<cuda::GpuMat>& d_mat);\n    _InputOutputArray(const ogl::Buffer& buf);\n    _InputOutputArray(const cuda::HostMem& cuda_mem);\n    template<typename _Tp> _InputOutputArray(const cudev::GpuMat_<_Tp>& m);\n    template<typename _Tp> _InputOutputArray(const std::vector<_Tp>& vec);\n    template<typename _Tp> _InputOutputArray(const std::vector<std::vector<_Tp> >& vec);\n    template<typename _Tp> _InputOutputArray(const std::vector<Mat_<_Tp> >& vec);\n    template<typename _Tp> _InputOutputArray(const Mat_<_Tp>& m);\n    template<typename _Tp> _InputOutputArray(const _Tp* vec, int n);\n    template<typename _Tp, int m, int n> _InputOutputArray(const Matx<_Tp, m, n>& matx);\n    _InputOutputArray(const UMat& m);\n    _InputOutputArray(const std::vector<UMat>& vec);\n};\n\ntypedef const _InputArray& InputArray;\ntypedef InputArray InputArrayOfArrays;\ntypedef const _OutputArray& OutputArray;\ntypedef OutputArray OutputArrayOfArrays;\ntypedef const _InputOutputArray& InputOutputArray;\ntypedef InputOutputArray InputOutputArrayOfArrays;\n\nCV_EXPORTS InputOutputArray noArray();\n\n/////////////////////////////////// MatAllocator //////////////////////////////////////\n\n//! Usage flags for allocator\nenum UMatUsageFlags\n{\n    USAGE_DEFAULT = 0,\n\n    // buffer allocation policy is platform and usage specific\n    USAGE_ALLOCATE_HOST_MEMORY = 1 << 0,\n    USAGE_ALLOCATE_DEVICE_MEMORY = 1 << 1,\n    USAGE_ALLOCATE_SHARED_MEMORY = 1 << 2, // It is not equal to: USAGE_ALLOCATE_HOST_MEMORY | USAGE_ALLOCATE_DEVICE_MEMORY\n\n    __UMAT_USAGE_FLAGS_32BIT = 0x7fffffff // Binary compatibility hint\n};\n\nstruct CV_EXPORTS UMatData;\n\n/** @brief  Custom array allocator\n*/\nclass CV_EXPORTS MatAllocator\n{\npublic:\n    MatAllocator() {}\n    virtual ~MatAllocator() {}\n\n    // let's comment it off for now to detect and fix all the uses of allocator\n    //virtual void allocate(int dims, const int* sizes, int type, int*& refcount,\n    //                      uchar*& datastart, uchar*& data, size_t* step) = 0;\n    //virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0;\n    virtual UMatData* allocate(int dims, const int* sizes, int type,\n                               void* data, size_t* step, int flags, UMatUsageFlags usageFlags) const = 0;\n    virtual bool allocate(UMatData* data, int accessflags, UMatUsageFlags usageFlags) const = 0;\n    virtual void deallocate(UMatData* data) const = 0;\n    virtual void map(UMatData* data, int accessflags) const;\n    virtual void unmap(UMatData* data) const;\n    virtual void download(UMatData* data, void* dst, int dims, const size_t sz[],\n                          const size_t srcofs[], const size_t srcstep[],\n                          const size_t dststep[]) const;\n    virtual void upload(UMatData* data, const void* src, int dims, const size_t sz[],\n                        const size_t dstofs[], const size_t dststep[],\n                        const size_t srcstep[]) const;\n    virtual void copy(UMatData* srcdata, UMatData* dstdata, int dims, const size_t sz[],\n                      const size_t srcofs[], const size_t srcstep[],\n                      const size_t dstofs[], const size_t dststep[], bool sync) const;\n\n    // default implementation returns DummyBufferPoolController\n    virtual BufferPoolController* getBufferPoolController(const char* id = NULL) const;\n};\n\n\n//////////////////////////////// MatCommaInitializer //////////////////////////////////\n\n/** @brief  Comma-separated Matrix Initializer\n\n The class instances are usually not created explicitly.\n Instead, they are created on \"matrix << firstValue\" operator.\n\n The sample below initializes 2x2 rotation matrix:\n\n \\code\n double angle = 30, a = cos(angle*CV_PI/180), b = sin(angle*CV_PI/180);\n Mat R = (Mat_<double>(2,2) << a, -b, b, a);\n \\endcode\n*/\ntemplate<typename _Tp> class MatCommaInitializer_\n{\npublic:\n    //! the constructor, created by \"matrix << firstValue\" operator, where matrix is cv::Mat\n    MatCommaInitializer_(Mat_<_Tp>* _m);\n    //! the operator that takes the next value and put it to the matrix\n    template<typename T2> MatCommaInitializer_<_Tp>& operator , (T2 v);\n    //! another form of conversion operator\n    operator Mat_<_Tp>() const;\nprotected:\n    MatIterator_<_Tp> it;\n};\n\n\n/////////////////////////////////////// Mat ///////////////////////////////////////////\n\n// note that umatdata might be allocated together\n// with the matrix data, not as a separate object.\n// therefore, it does not have constructor or destructor;\n// it should be explicitly initialized using init().\nstruct CV_EXPORTS UMatData\n{\n    enum { COPY_ON_MAP=1, HOST_COPY_OBSOLETE=2,\n        DEVICE_COPY_OBSOLETE=4, TEMP_UMAT=8, TEMP_COPIED_UMAT=24,\n        USER_ALLOCATED=32, DEVICE_MEM_MAPPED=64};\n    UMatData(const MatAllocator* allocator);\n    ~UMatData();\n\n    // provide atomic access to the structure\n    void lock();\n    void unlock();\n\n    bool hostCopyObsolete() const;\n    bool deviceCopyObsolete() const;\n    bool deviceMemMapped() const;\n    bool copyOnMap() const;\n    bool tempUMat() const;\n    bool tempCopiedUMat() const;\n    void markHostCopyObsolete(bool flag);\n    void markDeviceCopyObsolete(bool flag);\n    void markDeviceMemMapped(bool flag);\n\n    const MatAllocator* prevAllocator;\n    const MatAllocator* currAllocator;\n    int urefcount;\n    int refcount;\n    uchar* data;\n    uchar* origdata;\n    size_t size;\n\n    int flags;\n    void* handle;\n    void* userdata;\n    int allocatorFlags_;\n    int mapcount;\n    UMatData* originalUMatData;\n};\n\n\nstruct CV_EXPORTS UMatDataAutoLock\n{\n    explicit UMatDataAutoLock(UMatData* u);\n    ~UMatDataAutoLock();\n    UMatData* u;\n};\n\n\nstruct CV_EXPORTS MatSize\n{\n    explicit MatSize(int* _p);\n    Size operator()() const;\n    const int& operator[](int i) const;\n    int& operator[](int i);\n    operator const int*() const;\n    bool operator == (const MatSize& sz) const;\n    bool operator != (const MatSize& sz) const;\n\n    int* p;\n};\n\nstruct CV_EXPORTS MatStep\n{\n    MatStep();\n    explicit MatStep(size_t s);\n    const size_t& operator[](int i) const;\n    size_t& operator[](int i);\n    operator size_t() const;\n    MatStep& operator = (size_t s);\n\n    size_t* p;\n    size_t buf[2];\nprotected:\n    MatStep& operator = (const MatStep&);\n};\n\n/** @example cout_mat.cpp\nAn example demonstrating the serial out capabilities of cv::Mat\n*/\n\n /** @brief n-dimensional dense array class\n\nThe class Mat represents an n-dimensional dense numerical single-channel or multi-channel array. It\ncan be used to store real or complex-valued vectors and matrices, grayscale or color images, voxel\nvolumes, vector fields, point clouds, tensors, histograms (though, very high-dimensional histograms\nmay be better stored in a SparseMat ). The data layout of the array `M` is defined by the array\n`M.step[]`, so that the address of element \\f$(i_0,...,i_{M.dims-1})\\f$, where \\f$0\\leq i_k<M.size[k]\\f$, is\ncomputed as:\n\\f[addr(M_{i_0,...,i_{M.dims-1}}) = M.data + M.step[0]*i_0 + M.step[1]*i_1 + ... + M.step[M.dims-1]*i_{M.dims-1}\\f]\nIn case of a 2-dimensional array, the above formula is reduced to:\n\\f[addr(M_{i,j}) = M.data + M.step[0]*i + M.step[1]*j\\f]\nNote that `M.step[i] >= M.step[i+1]` (in fact, `M.step[i] >= M.step[i+1]*M.size[i+1]` ). This means\nthat 2-dimensional matrices are stored row-by-row, 3-dimensional matrices are stored plane-by-plane,\nand so on. M.step[M.dims-1] is minimal and always equal to the element size M.elemSize() .\n\nSo, the data layout in Mat is fully compatible with CvMat, IplImage, and CvMatND types from OpenCV\n1.x. It is also compatible with the majority of dense array types from the standard toolkits and\nSDKs, such as Numpy (ndarray), Win32 (independent device bitmaps), and others, that is, with any\narray that uses *steps* (or *strides*) to compute the position of a pixel. Due to this\ncompatibility, it is possible to make a Mat header for user-allocated data and process it in-place\nusing OpenCV functions.\n\nThere are many different ways to create a Mat object. The most popular options are listed below:\n\n- Use the create(nrows, ncols, type) method or the similar Mat(nrows, ncols, type[, fillValue])\nconstructor. A new array of the specified size and type is allocated. type has the same meaning as\nin the cvCreateMat method. For example, CV_8UC1 means a 8-bit single-channel array, CV_32FC2\nmeans a 2-channel (complex) floating-point array, and so on.\n@code\n    // make a 7x7 complex matrix filled with 1+3j.\n    Mat M(7,7,CV_32FC2,Scalar(1,3));\n    // and now turn M to a 100x60 15-channel 8-bit matrix.\n    // The old content will be deallocated\n    M.create(100,60,CV_8UC(15));\n@endcode\nAs noted in the introduction to this chapter, create() allocates only a new array when the shape\nor type of the current array are different from the specified ones.\n\n- Create a multi-dimensional array:\n@code\n    // create a 100x100x100 8-bit array\n    int sz[] = {100, 100, 100};\n    Mat bigCube(3, sz, CV_8U, Scalar::all(0));\n@endcode\nIt passes the number of dimensions =1 to the Mat constructor but the created array will be\n2-dimensional with the number of columns set to 1. So, Mat::dims is always \\>= 2 (can also be 0\nwhen the array is empty).\n\n- Use a copy constructor or assignment operator where there can be an array or expression on the\nright side (see below). As noted in the introduction, the array assignment is an O(1) operation\nbecause it only copies the header and increases the reference counter. The Mat::clone() method can\nbe used to get a full (deep) copy of the array when you need it.\n\n- Construct a header for a part of another array. It can be a single row, single column, several\nrows, several columns, rectangular region in the array (called a *minor* in algebra) or a\ndiagonal. Such operations are also O(1) because the new header references the same data. You can\nactually modify a part of the array using this feature, for example:\n@code\n    // add the 5-th row, multiplied by 3 to the 3rd row\n    M.row(3) = M.row(3) + M.row(5)*3;\n    // now copy the 7-th column to the 1-st column\n    // M.col(1) = M.col(7); // this will not work\n    Mat M1 = M.col(1);\n    M.col(7).copyTo(M1);\n    // create a new 320x240 image\n    Mat img(Size(320,240),CV_8UC3);\n    // select a ROI\n    Mat roi(img, Rect(10,10,100,100));\n    // fill the ROI with (0,255,0) (which is green in RGB space);\n    // the original 320x240 image will be modified\n    roi = Scalar(0,255,0);\n@endcode\nDue to the additional datastart and dataend members, it is possible to compute a relative\nsub-array position in the main *container* array using locateROI():\n@code\n    Mat A = Mat::eye(10, 10, CV_32S);\n    // extracts A columns, 1 (inclusive) to 3 (exclusive).\n    Mat B = A(Range::all(), Range(1, 3));\n    // extracts B rows, 5 (inclusive) to 9 (exclusive).\n    // that is, C \\~ A(Range(5, 9), Range(1, 3))\n    Mat C = B(Range(5, 9), Range::all());\n    Size size; Point ofs;\n    C.locateROI(size, ofs);\n    // size will be (width=10,height=10) and the ofs will be (x=1, y=5)\n@endcode\nAs in case of whole matrices, if you need a deep copy, use the `clone()` method of the extracted\nsub-matrices.\n\n- Make a header for user-allocated data. It can be useful to do the following:\n    -# Process \"foreign\" data using OpenCV (for example, when you implement a DirectShow\\* filter or\n    a processing module for gstreamer, and so on). For example:\n    @code\n        void process_video_frame(const unsigned char* pixels,\n                                 int width, int height, int step)\n        {\n            Mat img(height, width, CV_8UC3, pixels, step);\n            GaussianBlur(img, img, Size(7,7), 1.5, 1.5);\n        }\n    @endcode\n    -# Quickly initialize small matrices and/or get a super-fast element access.\n    @code\n        double m[3][3] = {{a, b, c}, {d, e, f}, {g, h, i}};\n        Mat M = Mat(3, 3, CV_64F, m).inv();\n    @endcode\n    .\n    Partial yet very common cases of this *user-allocated data* case are conversions from CvMat and\n    IplImage to Mat. For this purpose, there is function cv::cvarrToMat taking pointers to CvMat or\n    IplImage and the optional flag indicating whether to copy the data or not.\n    @snippet samples/cpp/image.cpp iplimage\n\n- Use MATLAB-style array initializers, zeros(), ones(), eye(), for example:\n@code\n    // create a double-precision identity martix and add it to M.\n    M += Mat::eye(M.rows, M.cols, CV_64F);\n@endcode\n\n- Use a comma-separated initializer:\n@code\n    // create a 3x3 double-precision identity matrix\n    Mat M = (Mat_<double>(3,3) << 1, 0, 0, 0, 1, 0, 0, 0, 1);\n@endcode\nWith this approach, you first call a constructor of the Mat class with the proper parameters, and\nthen you just put `<< operator` followed by comma-separated values that can be constants,\nvariables, expressions, and so on. Also, note the extra parentheses required to avoid compilation\nerrors.\n\nOnce the array is created, it is automatically managed via a reference-counting mechanism. If the\narray header is built on top of user-allocated data, you should handle the data by yourself. The\narray data is deallocated when no one points to it. If you want to release the data pointed by a\narray header before the array destructor is called, use Mat::release().\n\nThe next important thing to learn about the array class is element access. This manual already\ndescribed how to compute an address of each array element. Normally, you are not required to use the\nformula directly in the code. If you know the array element type (which can be retrieved using the\nmethod Mat::type() ), you can access the element \\f$M_{ij}\\f$ of a 2-dimensional array as:\n@code\n    M.at<double>(i,j) += 1.f;\n@endcode\nassuming that `M` is a double-precision floating-point array. There are several variants of the method\nat for a different number of dimensions.\n\nIf you need to process a whole row of a 2D array, the most efficient way is to get the pointer to\nthe row first, and then just use the plain C operator [] :\n@code\n    // compute sum of positive matrix elements\n    // (assuming that M isa double-precision matrix)\n    double sum=0;\n    for(int i = 0; i < M.rows; i++)\n    {\n        const double* Mi = M.ptr<double>(i);\n        for(int j = 0; j < M.cols; j++)\n            sum += std::max(Mi[j], 0.);\n    }\n@endcode\nSome operations, like the one above, do not actually depend on the array shape. They just process\nelements of an array one by one (or elements from multiple arrays that have the same coordinates,\nfor example, array addition). Such operations are called *element-wise*. It makes sense to check\nwhether all the input/output arrays are continuous, namely, have no gaps at the end of each row. If\nyes, process them as a long single row:\n@code\n    // compute the sum of positive matrix elements, optimized variant\n    double sum=0;\n    int cols = M.cols, rows = M.rows;\n    if(M.isContinuous())\n    {\n        cols *= rows;\n        rows = 1;\n    }\n    for(int i = 0; i < rows; i++)\n    {\n        const double* Mi = M.ptr<double>(i);\n        for(int j = 0; j < cols; j++)\n            sum += std::max(Mi[j], 0.);\n    }\n@endcode\nIn case of the continuous matrix, the outer loop body is executed just once. So, the overhead is\nsmaller, which is especially noticeable in case of small matrices.\n\nFinally, there are STL-style iterators that are smart enough to skip gaps between successive rows:\n@code\n    // compute sum of positive matrix elements, iterator-based variant\n    double sum=0;\n    MatConstIterator_<double> it = M.begin<double>(), it_end = M.end<double>();\n    for(; it != it_end; ++it)\n        sum += std::max(*it, 0.);\n@endcode\nThe matrix iterators are random-access iterators, so they can be passed to any STL algorithm,\nincluding std::sort().\n*/\nclass CV_EXPORTS Mat\n{\npublic:\n    /**\n    These are various constructors that form a matrix. As noted in the AutomaticAllocation, often\n    the default constructor is enough, and the proper matrix will be allocated by an OpenCV function.\n    The constructed matrix can further be assigned to another matrix or matrix expression or can be\n    allocated with Mat::create . In the former case, the old content is de-referenced.\n     */\n    Mat();\n\n    /** @overload\n    @param rows Number of rows in a 2D array.\n    @param cols Number of columns in a 2D array.\n    @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or\n    CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.\n    */\n    Mat(int rows, int cols, int type);\n\n    /** @overload\n    @param size 2D array size: Size(cols, rows) . In the Size() constructor, the number of rows and the\n    number of columns go in the reverse order.\n    @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or\n    CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.\n      */\n    Mat(Size size, int type);\n\n    /** @overload\n    @param rows Number of rows in a 2D array.\n    @param cols Number of columns in a 2D array.\n    @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or\n    CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.\n    @param s An optional value to initialize each matrix element with. To set all the matrix elements to\n    the particular value after the construction, use the assignment operator\n    Mat::operator=(const Scalar& value) .\n    */\n    Mat(int rows, int cols, int type, const Scalar& s);\n\n    /** @overload\n    @param size 2D array size: Size(cols, rows) . In the Size() constructor, the number of rows and the\n    number of columns go in the reverse order.\n    @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or\n    CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.\n    @param s An optional value to initialize each matrix element with. To set all the matrix elements to\n    the particular value after the construction, use the assignment operator\n    Mat::operator=(const Scalar& value) .\n      */\n    Mat(Size size, int type, const Scalar& s);\n\n    /** @overload\n    @param ndims Array dimensionality.\n    @param sizes Array of integers specifying an n-dimensional array shape.\n    @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or\n    CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.\n    */\n    Mat(int ndims, const int* sizes, int type);\n\n    /** @overload\n    @param ndims Array dimensionality.\n    @param sizes Array of integers specifying an n-dimensional array shape.\n    @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or\n    CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.\n    @param s An optional value to initialize each matrix element with. To set all the matrix elements to\n    the particular value after the construction, use the assignment operator\n    Mat::operator=(const Scalar& value) .\n    */\n    Mat(int ndims, const int* sizes, int type, const Scalar& s);\n\n    /** @overload\n    @param m Array that (as a whole or partly) is assigned to the constructed matrix. No data is copied\n    by these constructors. Instead, the header pointing to m data or its sub-array is constructed and\n    associated with it. The reference counter, if any, is incremented. So, when you modify the matrix\n    formed using such a constructor, you also modify the corresponding elements of m . If you want to\n    have an independent copy of the sub-array, use Mat::clone() .\n    */\n    Mat(const Mat& m);\n\n    /** @overload\n    @param rows Number of rows in a 2D array.\n    @param cols Number of columns in a 2D array.\n    @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or\n    CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.\n    @param data Pointer to the user data. Matrix constructors that take data and step parameters do not\n    allocate matrix data. Instead, they just initialize the matrix header that points to the specified\n    data, which means that no data is copied. This operation is very efficient and can be used to\n    process external data using OpenCV functions. The external data is not automatically deallocated, so\n    you should take care of it.\n    @param step Number of bytes each matrix row occupies. The value should include the padding bytes at\n    the end of each row, if any. If the parameter is missing (set to AUTO_STEP ), no padding is assumed\n    and the actual step is calculated as cols*elemSize(). See Mat::elemSize.\n    */\n    Mat(int rows, int cols, int type, void* data, size_t step=AUTO_STEP);\n\n    /** @overload\n    @param size 2D array size: Size(cols, rows) . In the Size() constructor, the number of rows and the\n    number of columns go in the reverse order.\n    @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or\n    CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.\n    @param data Pointer to the user data. Matrix constructors that take data and step parameters do not\n    allocate matrix data. Instead, they just initialize the matrix header that points to the specified\n    data, which means that no data is copied. This operation is very efficient and can be used to\n    process external data using OpenCV functions. The external data is not automatically deallocated, so\n    you should take care of it.\n    @param step Number of bytes each matrix row occupies. The value should include the padding bytes at\n    the end of each row, if any. If the parameter is missing (set to AUTO_STEP ), no padding is assumed\n    and the actual step is calculated as cols*elemSize(). See Mat::elemSize.\n    */\n    Mat(Size size, int type, void* data, size_t step=AUTO_STEP);\n\n    /** @overload\n    @param ndims Array dimensionality.\n    @param sizes Array of integers specifying an n-dimensional array shape.\n    @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or\n    CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.\n    @param data Pointer to the user data. Matrix constructors that take data and step parameters do not\n    allocate matrix data. Instead, they just initialize the matrix header that points to the specified\n    data, which means that no data is copied. This operation is very efficient and can be used to\n    process external data using OpenCV functions. The external data is not automatically deallocated, so\n    you should take care of it.\n    @param steps Array of ndims-1 steps in case of a multi-dimensional array (the last step is always\n    set to the element size). If not specified, the matrix is assumed to be continuous.\n    */\n    Mat(int ndims, const int* sizes, int type, void* data, const size_t* steps=0);\n\n    /** @overload\n    @param m Array that (as a whole or partly) is assigned to the constructed matrix. No data is copied\n    by these constructors. Instead, the header pointing to m data or its sub-array is constructed and\n    associated with it. The reference counter, if any, is incremented. So, when you modify the matrix\n    formed using such a constructor, you also modify the corresponding elements of m . If you want to\n    have an independent copy of the sub-array, use Mat::clone() .\n    @param rowRange Range of the m rows to take. As usual, the range start is inclusive and the range\n    end is exclusive. Use Range::all() to take all the rows.\n    @param colRange Range of the m columns to take. Use Range::all() to take all the columns.\n    */\n    Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all());\n\n    /** @overload\n    @param m Array that (as a whole or partly) is assigned to the constructed matrix. No data is copied\n    by these constructors. Instead, the header pointing to m data or its sub-array is constructed and\n    associated with it. The reference counter, if any, is incremented. So, when you modify the matrix\n    formed using such a constructor, you also modify the corresponding elements of m . If you want to\n    have an independent copy of the sub-array, use Mat::clone() .\n    @param roi Region of interest.\n    */\n    Mat(const Mat& m, const Rect& roi);\n\n    /** @overload\n    @param m Array that (as a whole or partly) is assigned to the constructed matrix. No data is copied\n    by these constructors. Instead, the header pointing to m data or its sub-array is constructed and\n    associated with it. The reference counter, if any, is incremented. So, when you modify the matrix\n    formed using such a constructor, you also modify the corresponding elements of m . If you want to\n    have an independent copy of the sub-array, use Mat::clone() .\n    @param ranges Array of selected ranges of m along each dimensionality.\n    */\n    Mat(const Mat& m, const Range* ranges);\n\n    /** @overload\n    @param vec STL vector whose elements form the matrix. The matrix has a single column and the number\n    of rows equal to the number of vector elements. Type of the matrix matches the type of vector\n    elements. The constructor can handle arbitrary types, for which there is a properly declared\n    DataType . This means that the vector elements must be primitive numbers or uni-type numerical\n    tuples of numbers. Mixed-type structures are not supported. The corresponding constructor is\n    explicit. Since STL vectors are not automatically converted to Mat instances, you should write\n    Mat(vec) explicitly. Unless you copy the data into the matrix ( copyData=true ), no new elements\n    will be added to the vector because it can potentially yield vector data reallocation, and, thus,\n    the matrix data pointer will be invalid.\n    @param copyData Flag to specify whether the underlying data of the STL vector should be copied\n    to (true) or shared with (false) the newly constructed matrix. When the data is copied, the\n    allocated buffer is managed using Mat reference counting mechanism. While the data is shared,\n    the reference counter is NULL, and you should not deallocate the data until the matrix is not\n    destructed.\n    */\n    template<typename _Tp> explicit Mat(const std::vector<_Tp>& vec, bool copyData=false);\n\n    /** @overload\n    */\n    template<typename _Tp, int n> explicit Mat(const Vec<_Tp, n>& vec, bool copyData=true);\n\n    /** @overload\n    */\n    template<typename _Tp, int m, int n> explicit Mat(const Matx<_Tp, m, n>& mtx, bool copyData=true);\n\n    /** @overload\n    */\n    template<typename _Tp> explicit Mat(const Point_<_Tp>& pt, bool copyData=true);\n\n    /** @overload\n    */\n    template<typename _Tp> explicit Mat(const Point3_<_Tp>& pt, bool copyData=true);\n\n    /** @overload\n    */\n    template<typename _Tp> explicit Mat(const MatCommaInitializer_<_Tp>& commaInitializer);\n\n    //! download data from GpuMat\n    explicit Mat(const cuda::GpuMat& m);\n\n    //! destructor - calls release()\n    ~Mat();\n\n    /** @brief assignment operators\n\n    These are available assignment operators. Since they all are very different, make sure to read the\n    operator parameters description.\n    @param m Assigned, right-hand-side matrix. Matrix assignment is an O(1) operation. This means that\n    no data is copied but the data is shared and the reference counter, if any, is incremented. Before\n    assigning new data, the old data is de-referenced via Mat::release .\n     */\n    Mat& operator = (const Mat& m);\n\n    /** @overload\n    @param expr Assigned matrix expression object. As opposite to the first form of the assignment\n    operation, the second form can reuse already allocated matrix if it has the right size and type to\n    fit the matrix expression result. It is automatically handled by the real function that the matrix\n    expressions is expanded to. For example, C=A+B is expanded to add(A, B, C), and add takes care of\n    automatic C reallocation.\n    */\n    Mat& operator = (const MatExpr& expr);\n\n    //! retrieve UMat from Mat\n    UMat getUMat(int accessFlags, UMatUsageFlags usageFlags = USAGE_DEFAULT) const;\n\n    /** @brief Creates a matrix header for the specified matrix row.\n\n    The method makes a new header for the specified matrix row and returns it. This is an O(1)\n    operation, regardless of the matrix size. The underlying data of the new matrix is shared with the\n    original matrix. Here is the example of one of the classical basic matrix processing operations,\n    axpy, used by LU and many other algorithms:\n    @code\n        inline void matrix_axpy(Mat& A, int i, int j, double alpha)\n        {\n            A.row(i) += A.row(j)*alpha;\n        }\n    @endcode\n    @note In the current implementation, the following code does not work as expected:\n    @code\n        Mat A;\n        ...\n        A.row(i) = A.row(j); // will not work\n    @endcode\n    This happens because A.row(i) forms a temporary header that is further assigned to another header.\n    Remember that each of these operations is O(1), that is, no data is copied. Thus, the above\n    assignment is not true if you may have expected the j-th row to be copied to the i-th row. To\n    achieve that, you should either turn this simple assignment into an expression or use the\n    Mat::copyTo method:\n    @code\n        Mat A;\n        ...\n        // works, but looks a bit obscure.\n        A.row(i) = A.row(j) + 0;\n        // this is a bit longer, but the recommended method.\n        A.row(j).copyTo(A.row(i));\n    @endcode\n    @param y A 0-based row index.\n     */\n    Mat row(int y) const;\n\n    /** @brief Creates a matrix header for the specified matrix column.\n\n    The method makes a new header for the specified matrix column and returns it. This is an O(1)\n    operation, regardless of the matrix size. The underlying data of the new matrix is shared with the\n    original matrix. See also the Mat::row description.\n    @param x A 0-based column index.\n     */\n    Mat col(int x) const;\n\n    /** @brief Creates a matrix header for the specified row span.\n\n    The method makes a new header for the specified row span of the matrix. Similarly to Mat::row and\n    Mat::col , this is an O(1) operation.\n    @param startrow An inclusive 0-based start index of the row span.\n    @param endrow An exclusive 0-based ending index of the row span.\n     */\n    Mat rowRange(int startrow, int endrow) const;\n\n    /** @overload\n    @param r Range structure containing both the start and the end indices.\n    */\n    Mat rowRange(const Range& r) const;\n\n    /** @brief Creates a matrix header for the specified column span.\n\n    The method makes a new header for the specified column span of the matrix. Similarly to Mat::row and\n    Mat::col , this is an O(1) operation.\n    @param startcol An inclusive 0-based start index of the column span.\n    @param endcol An exclusive 0-based ending index of the column span.\n     */\n    Mat colRange(int startcol, int endcol) const;\n\n    /** @overload\n    @param r Range structure containing both the start and the end indices.\n    */\n    Mat colRange(const Range& r) const;\n\n    /** @brief Extracts a diagonal from a matrix\n\n    The method makes a new header for the specified matrix diagonal. The new matrix is represented as a\n    single-column matrix. Similarly to Mat::row and Mat::col, this is an O(1) operation.\n    @param d index of the diagonal, with the following values:\n    - `d=0` is the main diagonal.\n    - `d>0` is a diagonal from the lower half. For example, d=1 means the diagonal is set\n      immediately below the main one.\n    - `d<0` is a diagonal from the upper half. For example, d=-1 means the diagonal is set\n      immediately above the main one.\n     */\n    Mat diag(int d=0) const;\n\n    /** @brief creates a diagonal matrix\n\n    The method makes a new header for the specified matrix diagonal. The new matrix is represented as a\n    single-column matrix. Similarly to Mat::row and Mat::col, this is an O(1) operation.\n    @param d Single-column matrix that forms a diagonal matrix\n     */\n    static Mat diag(const Mat& d);\n\n    /** @brief Creates a full copy of the array and the underlying data.\n\n    The method creates a full copy of the array. The original step[] is not taken into account. So, the\n    array copy is a continuous array occupying total()*elemSize() bytes.\n     */\n    Mat clone() const;\n\n    /** @brief Copies the matrix to another one.\n\n    The method copies the matrix data to another matrix. Before copying the data, the method invokes :\n    @code\n        m.create(this->size(), this->type());\n    @endcode\n    so that the destination matrix is reallocated if needed. While m.copyTo(m); works flawlessly, the\n    function does not handle the case of a partial overlap between the source and the destination\n    matrices.\n\n    When the operation mask is specified, if the Mat::create call shown above reallocates the matrix,\n    the newly allocated matrix is initialized with all zeros before copying the data.\n    @param m Destination matrix. If it does not have a proper size or type before the operation, it is\n    reallocated.\n     */\n    void copyTo( OutputArray m ) const;\n\n    /** @overload\n    @param m Destination matrix. If it does not have a proper size or type before the operation, it is\n    reallocated.\n    @param mask Operation mask. Its non-zero elements indicate which matrix elements need to be copied.\n    The mask has to be of type CV_8U and can have 1 or multiple channels.\n    */\n    void copyTo( OutputArray m, InputArray mask ) const;\n\n    /** @brief Converts an array to another data type with optional scaling.\n\n    The method converts source pixel values to the target data type. saturate_cast\\<\\> is applied at\n    the end to avoid possible overflows:\n\n    \\f[m(x,y) = saturate \\_ cast<rType>( \\alpha (*this)(x,y) +  \\beta )\\f]\n    @param m output matrix; if it does not have a proper size or type before the operation, it is\n    reallocated.\n    @param rtype desired output matrix type or, rather, the depth since the number of channels are the\n    same as the input has; if rtype is negative, the output matrix will have the same type as the input.\n    @param alpha optional scale factor.\n    @param beta optional delta added to the scaled values.\n     */\n    void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const;\n\n    /** @brief Provides a functional form of convertTo.\n\n    This is an internally used method called by the @ref MatrixExpressions engine.\n    @param m Destination array.\n    @param type Desired destination array depth (or -1 if it should be the same as the source type).\n     */\n    void assignTo( Mat& m, int type=-1 ) const;\n\n    /** @brief Sets all or some of the array elements to the specified value.\n    @param s Assigned scalar converted to the actual array type.\n    */\n    Mat& operator = (const Scalar& s);\n\n    /** @brief Sets all or some of the array elements to the specified value.\n\n    This is an advanced variant of the Mat::operator=(const Scalar& s) operator.\n    @param value Assigned scalar converted to the actual array type.\n    @param mask Operation mask of the same size as \\*this.\n     */\n    Mat& setTo(InputArray value, InputArray mask=noArray());\n\n    /** @brief Changes the shape and/or the number of channels of a 2D matrix without copying the data.\n\n    The method makes a new matrix header for \\*this elements. The new matrix may have a different size\n    and/or different number of channels. Any combination is possible if:\n    -   No extra elements are included into the new matrix and no elements are excluded. Consequently,\n        the product rows\\*cols\\*channels() must stay the same after the transformation.\n    -   No data is copied. That is, this is an O(1) operation. Consequently, if you change the number of\n        rows, or the operation changes the indices of elements row in some other way, the matrix must be\n        continuous. See Mat::isContinuous .\n\n    For example, if there is a set of 3D points stored as an STL vector, and you want to represent the\n    points as a 3xN matrix, do the following:\n    @code\n        std::vector<Point3f> vec;\n        ...\n        Mat pointMat = Mat(vec). // convert vector to Mat, O(1) operation\n                          reshape(1). // make Nx3 1-channel matrix out of Nx1 3-channel.\n                                      // Also, an O(1) operation\n                             t(); // finally, transpose the Nx3 matrix.\n                                  // This involves copying all the elements\n    @endcode\n    @param cn New number of channels. If the parameter is 0, the number of channels remains the same.\n    @param rows New number of rows. If the parameter is 0, the number of rows remains the same.\n     */\n    Mat reshape(int cn, int rows=0) const;\n\n    /** @overload */\n    Mat reshape(int cn, int newndims, const int* newsz) const;\n\n    /** @brief Transposes a matrix.\n\n    The method performs matrix transposition by means of matrix expressions. It does not perform the\n    actual transposition but returns a temporary matrix transposition object that can be further used as\n    a part of more complex matrix expressions or can be assigned to a matrix:\n    @code\n        Mat A1 = A + Mat::eye(A.size(), A.type())*lambda;\n        Mat C = A1.t()*A1; // compute (A + lambda*I)^t * (A + lamda*I)\n    @endcode\n     */\n    MatExpr t() const;\n\n    /** @brief Inverses a matrix.\n\n    The method performs a matrix inversion by means of matrix expressions. This means that a temporary\n    matrix inversion object is returned by the method and can be used further as a part of more complex\n    matrix expressions or can be assigned to a matrix.\n    @param method Matrix inversion method. One of cv::DecompTypes\n     */\n    MatExpr inv(int method=DECOMP_LU) const;\n\n    /** @brief Performs an element-wise multiplication or division of the two matrices.\n\n    The method returns a temporary object encoding per-element array multiplication, with optional\n    scale. Note that this is not a matrix multiplication that corresponds to a simpler \"\\*\" operator.\n\n    Example:\n    @code\n        Mat C = A.mul(5/B); // equivalent to divide(A, B, C, 5)\n    @endcode\n    @param m Another array of the same type and the same size as \\*this, or a matrix expression.\n    @param scale Optional scale factor.\n     */\n    MatExpr mul(InputArray m, double scale=1) const;\n\n    /** @brief Computes a cross-product of two 3-element vectors.\n\n    The method computes a cross-product of two 3-element vectors. The vectors must be 3-element\n    floating-point vectors of the same shape and size. The result is another 3-element vector of the\n    same shape and type as operands.\n    @param m Another cross-product operand.\n     */\n    Mat cross(InputArray m) const;\n\n    /** @brief Computes a dot-product of two vectors.\n\n    The method computes a dot-product of two matrices. If the matrices are not single-column or\n    single-row vectors, the top-to-bottom left-to-right scan ordering is used to treat them as 1D\n    vectors. The vectors must have the same size and type. If the matrices have more than one channel,\n    the dot products from all the channels are summed together.\n    @param m another dot-product operand.\n     */\n    double dot(InputArray m) const;\n\n    /** @brief Returns a zero array of the specified size and type.\n\n    The method returns a Matlab-style zero array initializer. It can be used to quickly form a constant\n    array as a function parameter, part of a matrix expression, or as a matrix initializer. :\n    @code\n        Mat A;\n        A = Mat::zeros(3, 3, CV_32F);\n    @endcode\n    In the example above, a new matrix is allocated only if A is not a 3x3 floating-point matrix.\n    Otherwise, the existing matrix A is filled with zeros.\n    @param rows Number of rows.\n    @param cols Number of columns.\n    @param type Created matrix type.\n     */\n    static MatExpr zeros(int rows, int cols, int type);\n\n    /** @overload\n    @param size Alternative to the matrix size specification Size(cols, rows) .\n    @param type Created matrix type.\n    */\n    static MatExpr zeros(Size size, int type);\n\n    /** @overload\n    @param ndims Array dimensionality.\n    @param sz Array of integers specifying the array shape.\n    @param type Created matrix type.\n    */\n    static MatExpr zeros(int ndims, const int* sz, int type);\n\n    /** @brief Returns an array of all 1's of the specified size and type.\n\n    The method returns a Matlab-style 1's array initializer, similarly to Mat::zeros. Note that using\n    this method you can initialize an array with an arbitrary value, using the following Matlab idiom:\n    @code\n        Mat A = Mat::ones(100, 100, CV_8U)*3; // make 100x100 matrix filled with 3.\n    @endcode\n    The above operation does not form a 100x100 matrix of 1's and then multiply it by 3. Instead, it\n    just remembers the scale factor (3 in this case) and use it when actually invoking the matrix\n    initializer.\n    @param rows Number of rows.\n    @param cols Number of columns.\n    @param type Created matrix type.\n     */\n    static MatExpr ones(int rows, int cols, int type);\n\n    /** @overload\n    @param size Alternative to the matrix size specification Size(cols, rows) .\n    @param type Created matrix type.\n    */\n    static MatExpr ones(Size size, int type);\n\n    /** @overload\n    @param ndims Array dimensionality.\n    @param sz Array of integers specifying the array shape.\n    @param type Created matrix type.\n    */\n    static MatExpr ones(int ndims, const int* sz, int type);\n\n    /** @brief Returns an identity matrix of the specified size and type.\n\n    The method returns a Matlab-style identity matrix initializer, similarly to Mat::zeros. Similarly to\n    Mat::ones, you can use a scale operation to create a scaled identity matrix efficiently:\n    @code\n        // make a 4x4 diagonal matrix with 0.1's on the diagonal.\n        Mat A = Mat::eye(4, 4, CV_32F)*0.1;\n    @endcode\n    @param rows Number of rows.\n    @param cols Number of columns.\n    @param type Created matrix type.\n     */\n    static MatExpr eye(int rows, int cols, int type);\n\n    /** @overload\n    @param size Alternative matrix size specification as Size(cols, rows) .\n    @param type Created matrix type.\n    */\n    static MatExpr eye(Size size, int type);\n\n    /** @brief Allocates new array data if needed.\n\n    This is one of the key Mat methods. Most new-style OpenCV functions and methods that produce arrays\n    call this method for each output array. The method uses the following algorithm:\n\n    -# If the current array shape and the type match the new ones, return immediately. Otherwise,\n       de-reference the previous data by calling Mat::release.\n    -# Initialize the new header.\n    -# Allocate the new data of total()\\*elemSize() bytes.\n    -# Allocate the new, associated with the data, reference counter and set it to 1.\n\n    Such a scheme makes the memory management robust and efficient at the same time and helps avoid\n    extra typing for you. This means that usually there is no need to explicitly allocate output arrays.\n    That is, instead of writing:\n    @code\n        Mat color;\n        ...\n        Mat gray(color.rows, color.cols, color.depth());\n        cvtColor(color, gray, COLOR_BGR2GRAY);\n    @endcode\n    you can simply write:\n    @code\n        Mat color;\n        ...\n        Mat gray;\n        cvtColor(color, gray, COLOR_BGR2GRAY);\n    @endcode\n    because cvtColor, as well as the most of OpenCV functions, calls Mat::create() for the output array\n    internally.\n    @param rows New number of rows.\n    @param cols New number of columns.\n    @param type New matrix type.\n     */\n    void create(int rows, int cols, int type);\n\n    /** @overload\n    @param size Alternative new matrix size specification: Size(cols, rows)\n    @param type New matrix type.\n    */\n    void create(Size size, int type);\n\n    /** @overload\n    @param ndims New array dimensionality.\n    @param sizes Array of integers specifying a new array shape.\n    @param type New matrix type.\n    */\n    void create(int ndims, const int* sizes, int type);\n\n    /** @brief Increments the reference counter.\n\n    The method increments the reference counter associated with the matrix data. If the matrix header\n    points to an external data set (see Mat::Mat ), the reference counter is NULL, and the method has no\n    effect in this case. Normally, to avoid memory leaks, the method should not be called explicitly. It\n    is called implicitly by the matrix assignment operator. The reference counter increment is an atomic\n    operation on the platforms that support it. Thus, it is safe to operate on the same matrices\n    asynchronously in different threads.\n     */\n    void addref();\n\n    /** @brief Decrements the reference counter and deallocates the matrix if needed.\n\n    The method decrements the reference counter associated with the matrix data. When the reference\n    counter reaches 0, the matrix data is deallocated and the data and the reference counter pointers\n    are set to NULL's. If the matrix header points to an external data set (see Mat::Mat ), the\n    reference counter is NULL, and the method has no effect in this case.\n\n    This method can be called manually to force the matrix data deallocation. But since this method is\n    automatically called in the destructor, or by any other method that changes the data pointer, it is\n    usually not needed. The reference counter decrement and check for 0 is an atomic operation on the\n    platforms that support it. Thus, it is safe to operate on the same matrices asynchronously in\n    different threads.\n     */\n    void release();\n\n    //! deallocates the matrix data\n    void deallocate();\n    //! internal use function; properly re-allocates _size, _step arrays\n    void copySize(const Mat& m);\n\n    /** @brief Reserves space for the certain number of rows.\n\n    The method reserves space for sz rows. If the matrix already has enough space to store sz rows,\n    nothing happens. If the matrix is reallocated, the first Mat::rows rows are preserved. The method\n    emulates the corresponding method of the STL vector class.\n    @param sz Number of rows.\n     */\n    void reserve(size_t sz);\n\n    /** @brief Changes the number of matrix rows.\n\n    The methods change the number of matrix rows. If the matrix is reallocated, the first\n    min(Mat::rows, sz) rows are preserved. The methods emulate the corresponding methods of the STL\n    vector class.\n    @param sz New number of rows.\n     */\n    void resize(size_t sz);\n\n    /** @overload\n    @param sz New number of rows.\n    @param s Value assigned to the newly added elements.\n     */\n    void resize(size_t sz, const Scalar& s);\n\n    //! internal function\n    void push_back_(const void* elem);\n\n    /** @brief Adds elements to the bottom of the matrix.\n\n    The methods add one or more elements to the bottom of the matrix. They emulate the corresponding\n    method of the STL vector class. When elem is Mat , its type and the number of columns must be the\n    same as in the container matrix.\n    @param elem Added element(s).\n     */\n    template<typename _Tp> void push_back(const _Tp& elem);\n\n    /** @overload\n    @param elem Added element(s).\n    */\n    template<typename _Tp> void push_back(const Mat_<_Tp>& elem);\n\n    /** @overload\n    @param m Added line(s).\n    */\n    void push_back(const Mat& m);\n\n    /** @brief Removes elements from the bottom of the matrix.\n\n    The method removes one or more rows from the bottom of the matrix.\n    @param nelems Number of removed rows. If it is greater than the total number of rows, an exception\n    is thrown.\n     */\n    void pop_back(size_t nelems=1);\n\n    /** @brief Locates the matrix header within a parent matrix.\n\n    After you extracted a submatrix from a matrix using Mat::row, Mat::col, Mat::rowRange,\n    Mat::colRange, and others, the resultant submatrix points just to the part of the original big\n    matrix. However, each submatrix contains information (represented by datastart and dataend\n    fields) that helps reconstruct the original matrix size and the position of the extracted\n    submatrix within the original matrix. The method locateROI does exactly that.\n    @param wholeSize Output parameter that contains the size of the whole matrix containing *this*\n    as a part.\n    @param ofs Output parameter that contains an offset of *this* inside the whole matrix.\n     */\n    void locateROI( Size& wholeSize, Point& ofs ) const;\n\n    /** @brief Adjusts a submatrix size and position within the parent matrix.\n\n    The method is complimentary to Mat::locateROI . The typical use of these functions is to determine\n    the submatrix position within the parent matrix and then shift the position somehow. Typically, it\n    can be required for filtering operations when pixels outside of the ROI should be taken into\n    account. When all the method parameters are positive, the ROI needs to grow in all directions by the\n    specified amount, for example:\n    @code\n        A.adjustROI(2, 2, 2, 2);\n    @endcode\n    In this example, the matrix size is increased by 4 elements in each direction. The matrix is shifted\n    by 2 elements to the left and 2 elements up, which brings in all the necessary pixels for the\n    filtering with the 5x5 kernel.\n\n    adjustROI forces the adjusted ROI to be inside of the parent matrix that is boundaries of the\n    adjusted ROI are constrained by boundaries of the parent matrix. For example, if the submatrix A is\n    located in the first row of a parent matrix and you called A.adjustROI(2, 2, 2, 2) then A will not\n    be increased in the upward direction.\n\n    The function is used internally by the OpenCV filtering functions, like filter2D , morphological\n    operations, and so on.\n    @param dtop Shift of the top submatrix boundary upwards.\n    @param dbottom Shift of the bottom submatrix boundary downwards.\n    @param dleft Shift of the left submatrix boundary to the left.\n    @param dright Shift of the right submatrix boundary to the right.\n    @sa copyMakeBorder\n     */\n    Mat& adjustROI( int dtop, int dbottom, int dleft, int dright );\n\n    /** @brief Extracts a rectangular submatrix.\n\n    The operators make a new header for the specified sub-array of \\*this . They are the most\n    generalized forms of Mat::row, Mat::col, Mat::rowRange, and Mat::colRange . For example,\n    `A(Range(0, 10), Range::all())` is equivalent to `A.rowRange(0, 10)`. Similarly to all of the above,\n    the operators are O(1) operations, that is, no matrix data is copied.\n    @param rowRange Start and end row of the extracted submatrix. The upper boundary is not included. To\n    select all the rows, use Range::all().\n    @param colRange Start and end column of the extracted submatrix. The upper boundary is not included.\n    To select all the columns, use Range::all().\n     */\n    Mat operator()( Range rowRange, Range colRange ) const;\n\n    /** @overload\n    @param roi Extracted submatrix specified as a rectangle.\n    */\n    Mat operator()( const Rect& roi ) const;\n\n    /** @overload\n    @param ranges Array of selected ranges along each array dimension.\n    */\n    Mat operator()( const Range* ranges ) const;\n\n    // //! converts header to CvMat; no data is copied\n    // operator CvMat() const;\n    // //! converts header to CvMatND; no data is copied\n    // operator CvMatND() const;\n    // //! converts header to IplImage; no data is copied\n    // operator IplImage() const;\n\n    template<typename _Tp> operator std::vector<_Tp>() const;\n    template<typename _Tp, int n> operator Vec<_Tp, n>() const;\n    template<typename _Tp, int m, int n> operator Matx<_Tp, m, n>() const;\n\n    /** @brief Reports whether the matrix is continuous or not.\n\n    The method returns true if the matrix elements are stored continuously without gaps at the end of\n    each row. Otherwise, it returns false. Obviously, 1x1 or 1xN matrices are always continuous.\n    Matrices created with Mat::create are always continuous. But if you extract a part of the matrix\n    using Mat::col, Mat::diag, and so on, or constructed a matrix header for externally allocated data,\n    such matrices may no longer have this property.\n\n    The continuity flag is stored as a bit in the Mat::flags field and is computed automatically when\n    you construct a matrix header. Thus, the continuity check is a very fast operation, though\n    theoretically it could be done as follows:\n    @code\n        // alternative implementation of Mat::isContinuous()\n        bool myCheckMatContinuity(const Mat& m)\n        {\n            //return (m.flags & Mat::CONTINUOUS_FLAG) != 0;\n            return m.rows == 1 || m.step == m.cols*m.elemSize();\n        }\n    @endcode\n    The method is used in quite a few of OpenCV functions. The point is that element-wise operations\n    (such as arithmetic and logical operations, math functions, alpha blending, color space\n    transformations, and others) do not depend on the image geometry. Thus, if all the input and output\n    arrays are continuous, the functions can process them as very long single-row vectors. The example\n    below illustrates how an alpha-blending function can be implemented:\n    @code\n        template<typename T>\n        void alphaBlendRGBA(const Mat& src1, const Mat& src2, Mat& dst)\n        {\n            const float alpha_scale = (float)std::numeric_limits<T>::max(),\n                        inv_scale = 1.f/alpha_scale;\n\n            CV_Assert( src1.type() == src2.type() &&\n                       src1.type() == CV_MAKETYPE(DataType<T>::depth, 4) &&\n                       src1.size() == src2.size());\n            Size size = src1.size();\n            dst.create(size, src1.type());\n\n            // here is the idiom: check the arrays for continuity and,\n            // if this is the case,\n            // treat the arrays as 1D vectors\n            if( src1.isContinuous() && src2.isContinuous() && dst.isContinuous() )\n            {\n                size.width *= size.height;\n                size.height = 1;\n            }\n            size.width *= 4;\n\n            for( int i = 0; i < size.height; i++ )\n            {\n                // when the arrays are continuous,\n                // the outer loop is executed only once\n                const T* ptr1 = src1.ptr<T>(i);\n                const T* ptr2 = src2.ptr<T>(i);\n                T* dptr = dst.ptr<T>(i);\n\n                for( int j = 0; j < size.width; j += 4 )\n                {\n                    float alpha = ptr1[j+3]*inv_scale, beta = ptr2[j+3]*inv_scale;\n                    dptr[j] = saturate_cast<T>(ptr1[j]*alpha + ptr2[j]*beta);\n                    dptr[j+1] = saturate_cast<T>(ptr1[j+1]*alpha + ptr2[j+1]*beta);\n                    dptr[j+2] = saturate_cast<T>(ptr1[j+2]*alpha + ptr2[j+2]*beta);\n                    dptr[j+3] = saturate_cast<T>((1 - (1-alpha)*(1-beta))*alpha_scale);\n                }\n            }\n        }\n    @endcode\n    This approach, while being very simple, can boost the performance of a simple element-operation by\n    10-20 percents, especially if the image is rather small and the operation is quite simple.\n\n    Another OpenCV idiom in this function, a call of Mat::create for the destination array, that\n    allocates the destination array unless it already has the proper size and type. And while the newly\n    allocated arrays are always continuous, you still need to check the destination array because\n    Mat::create does not always allocate a new matrix.\n     */\n    bool isContinuous() const;\n\n    //! returns true if the matrix is a submatrix of another matrix\n    bool isSubmatrix() const;\n\n    /** @brief Returns the matrix element size in bytes.\n\n    The method returns the matrix element size in bytes. For example, if the matrix type is CV_16SC3 ,\n    the method returns 3\\*sizeof(short) or 6.\n     */\n    size_t elemSize() const;\n\n    /** @brief Returns the size of each matrix element channel in bytes.\n\n    The method returns the matrix element channel size in bytes, that is, it ignores the number of\n    channels. For example, if the matrix type is CV_16SC3 , the method returns sizeof(short) or 2.\n     */\n    size_t elemSize1() const;\n\n    /** @brief Returns the type of a matrix element.\n\n    The method returns a matrix element type. This is an identifier compatible with the CvMat type\n    system, like CV_16SC3 or 16-bit signed 3-channel array, and so on.\n     */\n    int type() const;\n\n    /** @brief Returns the depth of a matrix element.\n\n    The method returns the identifier of the matrix element depth (the type of each individual channel).\n    For example, for a 16-bit signed element array, the method returns CV_16S . A complete list of\n    matrix types contains the following values:\n    -   CV_8U - 8-bit unsigned integers ( 0..255 )\n    -   CV_8S - 8-bit signed integers ( -128..127 )\n    -   CV_16U - 16-bit unsigned integers ( 0..65535 )\n    -   CV_16S - 16-bit signed integers ( -32768..32767 )\n    -   CV_32S - 32-bit signed integers ( -2147483648..2147483647 )\n    -   CV_32F - 32-bit floating-point numbers ( -FLT_MAX..FLT_MAX, INF, NAN )\n    -   CV_64F - 64-bit floating-point numbers ( -DBL_MAX..DBL_MAX, INF, NAN )\n     */\n    int depth() const;\n\n    /** @brief Returns the number of matrix channels.\n\n    The method returns the number of matrix channels.\n     */\n    int channels() const;\n\n    /** @brief Returns a normalized step.\n\n    The method returns a matrix step divided by Mat::elemSize1() . It can be useful to quickly access an\n    arbitrary matrix element.\n     */\n    size_t step1(int i=0) const;\n\n    /** @brief Returns true if the array has no elements.\n\n    The method returns true if Mat::total() is 0 or if Mat::data is NULL. Because of pop_back() and\n    resize() methods `M.total() == 0` does not imply that `M.data == NULL`.\n     */\n    bool empty() const;\n\n    /** @brief Returns the total number of array elements.\n\n    The method returns the number of array elements (a number of pixels if the array represents an\n    image).\n     */\n    size_t total() const;\n\n    //! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise\n    int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const;\n\n    /** @brief Returns a pointer to the specified matrix row.\n\n    The methods return `uchar*` or typed pointer to the specified matrix row. See the sample in\n    Mat::isContinuous to know how to use these methods.\n    @param i0 A 0-based row index.\n     */\n    uchar* ptr(int i0=0);\n    /** @overload */\n    const uchar* ptr(int i0=0) const;\n\n    /** @overload */\n    uchar* ptr(int i0, int i1);\n    /** @overload */\n    const uchar* ptr(int i0, int i1) const;\n\n    /** @overload */\n    uchar* ptr(int i0, int i1, int i2);\n    /** @overload */\n    const uchar* ptr(int i0, int i1, int i2) const;\n\n    /** @overload */\n    uchar* ptr(const int* idx);\n    /** @overload */\n    const uchar* ptr(const int* idx) const;\n    /** @overload */\n    template<int n> uchar* ptr(const Vec<int, n>& idx);\n    /** @overload */\n    template<int n> const uchar* ptr(const Vec<int, n>& idx) const;\n\n    /** @overload */\n    template<typename _Tp> _Tp* ptr(int i0=0);\n    /** @overload */\n    template<typename _Tp> const _Tp* ptr(int i0=0) const;\n    /** @overload */\n    template<typename _Tp> _Tp* ptr(int i0, int i1);\n    /** @overload */\n    template<typename _Tp> const _Tp* ptr(int i0, int i1) const;\n    /** @overload */\n    template<typename _Tp> _Tp* ptr(int i0, int i1, int i2);\n    /** @overload */\n    template<typename _Tp> const _Tp* ptr(int i0, int i1, int i2) const;\n    /** @overload */\n    template<typename _Tp> _Tp* ptr(const int* idx);\n    /** @overload */\n    template<typename _Tp> const _Tp* ptr(const int* idx) const;\n    /** @overload */\n    template<typename _Tp, int n> _Tp* ptr(const Vec<int, n>& idx);\n    /** @overload */\n    template<typename _Tp, int n> const _Tp* ptr(const Vec<int, n>& idx) const;\n\n    /** @brief Returns a reference to the specified array element.\n\n    The template methods return a reference to the specified array element. For the sake of higher\n    performance, the index range checks are only performed in the Debug configuration.\n\n    Note that the variants with a single index (i) can be used to access elements of single-row or\n    single-column 2-dimensional arrays. That is, if, for example, A is a 1 x N floating-point matrix and\n    B is an M x 1 integer matrix, you can simply write `A.at<float>(k+4)` and `B.at<int>(2*i+1)`\n    instead of `A.at<float>(0,k+4)` and `B.at<int>(2*i+1,0)`, respectively.\n\n    The example below initializes a Hilbert matrix:\n    @code\n        Mat H(100, 100, CV_64F);\n        for(int i = 0; i < H.rows; i++)\n            for(int j = 0; j < H.cols; j++)\n                H.at<double>(i,j)=1./(i+j+1);\n    @endcode\n    @param i0 Index along the dimension 0\n     */\n    template<typename _Tp> _Tp& at(int i0=0);\n    /** @overload\n    @param i0 Index along the dimension 0\n    */\n    template<typename _Tp> const _Tp& at(int i0=0) const;\n    /** @overload\n    @param i0 Index along the dimension 0\n    @param i1 Index along the dimension 1\n    */\n    template<typename _Tp> _Tp& at(int i0, int i1);\n    /** @overload\n    @param i0 Index along the dimension 0\n    @param i1 Index along the dimension 1\n    */\n    template<typename _Tp> const _Tp& at(int i0, int i1) const;\n\n    /** @overload\n    @param i0 Index along the dimension 0\n    @param i1 Index along the dimension 1\n    @param i2 Index along the dimension 2\n    */\n    template<typename _Tp> _Tp& at(int i0, int i1, int i2);\n    /** @overload\n    @param i0 Index along the dimension 0\n    @param i1 Index along the dimension 1\n    @param i2 Index along the dimension 2\n    */\n    template<typename _Tp> const _Tp& at(int i0, int i1, int i2) const;\n\n    /** @overload\n    @param idx Array of Mat::dims indices.\n    */\n    template<typename _Tp> _Tp& at(const int* idx);\n    /** @overload\n    @param idx Array of Mat::dims indices.\n    */\n    template<typename _Tp> const _Tp& at(const int* idx) const;\n\n    /** @overload */\n    template<typename _Tp, int n> _Tp& at(const Vec<int, n>& idx);\n    /** @overload */\n    template<typename _Tp, int n> const _Tp& at(const Vec<int, n>& idx) const;\n\n    /** @overload\n    special versions for 2D arrays (especially convenient for referencing image pixels)\n    @param pt Element position specified as Point(j,i) .\n    */\n    template<typename _Tp> _Tp& at(Point pt);\n    /** @overload\n    special versions for 2D arrays (especially convenient for referencing image pixels)\n    @param pt Element position specified as Point(j,i) .\n    */\n    template<typename _Tp> const _Tp& at(Point pt) const;\n\n    /** @brief Returns the matrix iterator and sets it to the first matrix element.\n\n    The methods return the matrix read-only or read-write iterators. The use of matrix iterators is very\n    similar to the use of bi-directional STL iterators. In the example below, the alpha blending\n    function is rewritten using the matrix iterators:\n    @code\n        template<typename T>\n        void alphaBlendRGBA(const Mat& src1, const Mat& src2, Mat& dst)\n        {\n            typedef Vec<T, 4> VT;\n\n            const float alpha_scale = (float)std::numeric_limits<T>::max(),\n                        inv_scale = 1.f/alpha_scale;\n\n            CV_Assert( src1.type() == src2.type() &&\n                       src1.type() == DataType<VT>::type &&\n                       src1.size() == src2.size());\n            Size size = src1.size();\n            dst.create(size, src1.type());\n\n            MatConstIterator_<VT> it1 = src1.begin<VT>(), it1_end = src1.end<VT>();\n            MatConstIterator_<VT> it2 = src2.begin<VT>();\n            MatIterator_<VT> dst_it = dst.begin<VT>();\n\n            for( ; it1 != it1_end; ++it1, ++it2, ++dst_it )\n            {\n                VT pix1 = *it1, pix2 = *it2;\n                float alpha = pix1[3]*inv_scale, beta = pix2[3]*inv_scale;\n                *dst_it = VT(saturate_cast<T>(pix1[0]*alpha + pix2[0]*beta),\n                             saturate_cast<T>(pix1[1]*alpha + pix2[1]*beta),\n                             saturate_cast<T>(pix1[2]*alpha + pix2[2]*beta),\n                             saturate_cast<T>((1 - (1-alpha)*(1-beta))*alpha_scale));\n            }\n        }\n    @endcode\n     */\n    template<typename _Tp> MatIterator_<_Tp> begin();\n    template<typename _Tp> MatConstIterator_<_Tp> begin() const;\n\n    /** @brief Returns the matrix iterator and sets it to the after-last matrix element.\n\n    The methods return the matrix read-only or read-write iterators, set to the point following the last\n    matrix element.\n     */\n    template<typename _Tp> MatIterator_<_Tp> end();\n    template<typename _Tp> MatConstIterator_<_Tp> end() const;\n\n    /** @brief Invoke with arguments functor, and runs the functor over all matrix element.\n\n    The methods runs operation in parallel. Operation is passed by arguments. Operation have to be a\n    function pointer, a function object or a lambda(C++11).\n\n    All of below operation is equal. Put 0xFF to first channel of all matrix elements:\n    @code\n        Mat image(1920, 1080, CV_8UC3);\n        typedef cv::Point3_<uint8_t> Pixel;\n\n        // first. raw pointer access.\n        for (int r = 0; r < image.rows; ++r) {\n            Pixel* ptr = image.ptr<Pixel>(0, r);\n            const Pixel* ptr_end = ptr + image.cols;\n            for (; ptr != ptr_end; ++ptr) {\n                ptr->x = 255;\n            }\n        }\n\n        // Using MatIterator. (Simple but there are a Iterator's overhead)\n        for (Pixel &p : cv::Mat_<Pixel>(image)) {\n            p.x = 255;\n        }\n\n        // Parallel execution with function object.\n        struct Operator {\n            void operator ()(Pixel &pixel, const int * position) {\n                pixel.x = 255;\n            }\n        };\n        image.forEach<Pixel>(Operator());\n\n        // Parallel execution using C++11 lambda.\n        image.forEach<Pixel>([](Pixel &p, const int * position) -> void {\n            p.x = 255;\n        });\n    @endcode\n    position parameter is index of current pixel:\n    @code\n        // Creating 3D matrix (255 x 255 x 255) typed uint8_t,\n        //  and initialize all elements by the value which equals elements position.\n        //  i.e. pixels (x,y,z) = (1,2,3) is (b,g,r) = (1,2,3).\n\n        int sizes[] = { 255, 255, 255 };\n        typedef cv::Point3_<uint8_t> Pixel;\n\n        Mat_<Pixel> image = Mat::zeros(3, sizes, CV_8UC3);\n\n        image.forEachWithPosition([&](Pixel& pixel, const int position[]) -> void{\n            pixel.x = position[0];\n            pixel.y = position[1];\n            pixel.z = position[2];\n        });\n    @endcode\n     */\n    template<typename _Tp, typename Functor> void forEach(const Functor& operation);\n    /** @overload */\n    template<typename _Tp, typename Functor> void forEach(const Functor& operation) const;\n\n#ifdef CV_CXX_MOVE_SEMANTICS\n    Mat(Mat&& m);\n    Mat& operator = (Mat&& m);\n#endif\n\n    enum { MAGIC_VAL  = 0x42FF0000, AUTO_STEP = 0, CONTINUOUS_FLAG = CV_MAT_CONT_FLAG, SUBMATRIX_FLAG = CV_SUBMAT_FLAG };\n    enum { MAGIC_MASK = 0xFFFF0000, TYPE_MASK = 0x00000FFF, DEPTH_MASK = 7 };\n\n    /*! includes several bit-fields:\n         - the magic signature\n         - continuity flag\n         - depth\n         - number of channels\n     */\n    int flags;\n    //! the matrix dimensionality, >= 2\n    int dims;\n    //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions\n    int rows, cols;\n    //! pointer to the data\n    uchar* data;\n\n    //! helper fields used in locateROI and adjustROI\n    const uchar* datastart;\n    const uchar* dataend;\n    const uchar* datalimit;\n\n    //! custom allocator\n    MatAllocator* allocator;\n    //! and the standard allocator\n    static MatAllocator* getStdAllocator();\n    static MatAllocator* getDefaultAllocator();\n    static void setDefaultAllocator(MatAllocator* allocator);\n\n    //! interaction with UMat\n    UMatData* u;\n\n    MatSize size;\n    MatStep step;\n\nprotected:\n    template<typename _Tp, typename Functor> void forEach_impl(const Functor& operation);\n};\n\n\n///////////////////////////////// Mat_<_Tp> ////////////////////////////////////\n\n/** @brief Template matrix class derived from Mat\n\n@code\n    template<typename _Tp> class Mat_ : public Mat\n    {\n    public:\n        // ... some specific methods\n        //         and\n        // no new extra fields\n    };\n@endcode\nThe class `Mat_<_Tp>` is a *thin* template wrapper on top of the Mat class. It does not have any\nextra data fields. Nor this class nor Mat has any virtual methods. Thus, references or pointers to\nthese two classes can be freely but carefully converted one to another. For example:\n@code\n    // create a 100x100 8-bit matrix\n    Mat M(100,100,CV_8U);\n    // this will be compiled fine. no any data conversion will be done.\n    Mat_<float>& M1 = (Mat_<float>&)M;\n    // the program is likely to crash at the statement below\n    M1(99,99) = 1.f;\n@endcode\nWhile Mat is sufficient in most cases, Mat_ can be more convenient if you use a lot of element\naccess operations and if you know matrix type at the compilation time. Note that\n`Mat::at(int y,int x)` and `Mat_::operator()(int y,int x)` do absolutely the same\nand run at the same speed, but the latter is certainly shorter:\n@code\n    Mat_<double> M(20,20);\n    for(int i = 0; i < M.rows; i++)\n        for(int j = 0; j < M.cols; j++)\n            M(i,j) = 1./(i+j+1);\n    Mat E, V;\n    eigen(M,E,V);\n    cout << E.at<double>(0,0)/E.at<double>(M.rows-1,0);\n@endcode\nTo use Mat_ for multi-channel images/matrices, pass Vec as a Mat_ parameter:\n@code\n    // allocate a 320x240 color image and fill it with green (in RGB space)\n    Mat_<Vec3b> img(240, 320, Vec3b(0,255,0));\n    // now draw a diagonal white line\n    for(int i = 0; i < 100; i++)\n        img(i,i)=Vec3b(255,255,255);\n    // and now scramble the 2nd (red) channel of each pixel\n    for(int i = 0; i < img.rows; i++)\n        for(int j = 0; j < img.cols; j++)\n            img(i,j)[2] ^= (uchar)(i ^ j);\n@endcode\n */\ntemplate<typename _Tp> class Mat_ : public Mat\n{\npublic:\n    typedef _Tp value_type;\n    typedef typename DataType<_Tp>::channel_type channel_type;\n    typedef MatIterator_<_Tp> iterator;\n    typedef MatConstIterator_<_Tp> const_iterator;\n\n    //! default constructor\n    Mat_();\n    //! equivalent to Mat(_rows, _cols, DataType<_Tp>::type)\n    Mat_(int _rows, int _cols);\n    //! constructor that sets each matrix element to specified value\n    Mat_(int _rows, int _cols, const _Tp& value);\n    //! equivalent to Mat(_size, DataType<_Tp>::type)\n    explicit Mat_(Size _size);\n    //! constructor that sets each matrix element to specified value\n    Mat_(Size _size, const _Tp& value);\n    //! n-dim array constructor\n    Mat_(int _ndims, const int* _sizes);\n    //! n-dim array constructor that sets each matrix element to specified value\n    Mat_(int _ndims, const int* _sizes, const _Tp& value);\n    //! copy/conversion contructor. If m is of different type, it's converted\n    Mat_(const Mat& m);\n    //! copy constructor\n    Mat_(const Mat_& m);\n    //! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type\n    Mat_(int _rows, int _cols, _Tp* _data, size_t _step=AUTO_STEP);\n    //! constructs n-dim matrix on top of user-allocated data. steps are in bytes(!!!), regardless of the type\n    Mat_(int _ndims, const int* _sizes, _Tp* _data, const size_t* _steps=0);\n    //! selects a submatrix\n    Mat_(const Mat_& m, const Range& rowRange, const Range& colRange=Range::all());\n    //! selects a submatrix\n    Mat_(const Mat_& m, const Rect& roi);\n    //! selects a submatrix, n-dim version\n    Mat_(const Mat_& m, const Range* ranges);\n    //! from a matrix expression\n    explicit Mat_(const MatExpr& e);\n    //! makes a matrix out of Vec, std::vector, Point_ or Point3_. The matrix will have a single column\n    explicit Mat_(const std::vector<_Tp>& vec, bool copyData=false);\n    template<int n> explicit Mat_(const Vec<typename DataType<_Tp>::channel_type, n>& vec, bool copyData=true);\n    template<int m, int n> explicit Mat_(const Matx<typename DataType<_Tp>::channel_type, m, n>& mtx, bool copyData=true);\n    explicit Mat_(const Point_<typename DataType<_Tp>::channel_type>& pt, bool copyData=true);\n    explicit Mat_(const Point3_<typename DataType<_Tp>::channel_type>& pt, bool copyData=true);\n    explicit Mat_(const MatCommaInitializer_<_Tp>& commaInitializer);\n\n    Mat_& operator = (const Mat& m);\n    Mat_& operator = (const Mat_& m);\n    //! set all the elements to s.\n    Mat_& operator = (const _Tp& s);\n    //! assign a matrix expression\n    Mat_& operator = (const MatExpr& e);\n\n    //! iterators; they are smart enough to skip gaps in the end of rows\n    iterator begin();\n    iterator end();\n    const_iterator begin() const;\n    const_iterator end() const;\n\n    //! template methods for for operation over all matrix elements.\n    // the operations take care of skipping gaps in the end of rows (if any)\n    template<typename Functor> void forEach(const Functor& operation);\n    template<typename Functor> void forEach(const Functor& operation) const;\n\n    //! equivalent to Mat::create(_rows, _cols, DataType<_Tp>::type)\n    void create(int _rows, int _cols);\n    //! equivalent to Mat::create(_size, DataType<_Tp>::type)\n    void create(Size _size);\n    //! equivalent to Mat::create(_ndims, _sizes, DatType<_Tp>::type)\n    void create(int _ndims, const int* _sizes);\n    //! cross-product\n    Mat_ cross(const Mat_& m) const;\n    //! data type conversion\n    template<typename T2> operator Mat_<T2>() const;\n    //! overridden forms of Mat::row() etc.\n    Mat_ row(int y) const;\n    Mat_ col(int x) const;\n    Mat_ diag(int d=0) const;\n    Mat_ clone() const;\n\n    //! overridden forms of Mat::elemSize() etc.\n    size_t elemSize() const;\n    size_t elemSize1() const;\n    int type() const;\n    int depth() const;\n    int channels() const;\n    size_t step1(int i=0) const;\n    //! returns step()/sizeof(_Tp)\n    size_t stepT(int i=0) const;\n\n    //! overridden forms of Mat::zeros() etc. Data type is omitted, of course\n    static MatExpr zeros(int rows, int cols);\n    static MatExpr zeros(Size size);\n    static MatExpr zeros(int _ndims, const int* _sizes);\n    static MatExpr ones(int rows, int cols);\n    static MatExpr ones(Size size);\n    static MatExpr ones(int _ndims, const int* _sizes);\n    static MatExpr eye(int rows, int cols);\n    static MatExpr eye(Size size);\n\n    //! some more overriden methods\n    Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright );\n    Mat_ operator()( const Range& rowRange, const Range& colRange ) const;\n    Mat_ operator()( const Rect& roi ) const;\n    Mat_ operator()( const Range* ranges ) const;\n\n    //! more convenient forms of row and element access operators\n    _Tp* operator [](int y);\n    const _Tp* operator [](int y) const;\n\n    //! returns reference to the specified element\n    _Tp& operator ()(const int* idx);\n    //! returns read-only reference to the specified element\n    const _Tp& operator ()(const int* idx) const;\n\n    //! returns reference to the specified element\n    template<int n> _Tp& operator ()(const Vec<int, n>& idx);\n    //! returns read-only reference to the specified element\n    template<int n> const _Tp& operator ()(const Vec<int, n>& idx) const;\n\n    //! returns reference to the specified element (1D case)\n    _Tp& operator ()(int idx0);\n    //! returns read-only reference to the specified element (1D case)\n    const _Tp& operator ()(int idx0) const;\n    //! returns reference to the specified element (2D case)\n    _Tp& operator ()(int idx0, int idx1);\n    //! returns read-only reference to the specified element (2D case)\n    const _Tp& operator ()(int idx0, int idx1) const;\n    //! returns reference to the specified element (3D case)\n    _Tp& operator ()(int idx0, int idx1, int idx2);\n    //! returns read-only reference to the specified element (3D case)\n    const _Tp& operator ()(int idx0, int idx1, int idx2) const;\n\n    _Tp& operator ()(Point pt);\n    const _Tp& operator ()(Point pt) const;\n\n    //! conversion to vector.\n    operator std::vector<_Tp>() const;\n    //! conversion to Vec\n    template<int n> operator Vec<typename DataType<_Tp>::channel_type, n>() const;\n    //! conversion to Matx\n    template<int m, int n> operator Matx<typename DataType<_Tp>::channel_type, m, n>() const;\n\n#ifdef CV_CXX_MOVE_SEMANTICS\n    Mat_(Mat_&& m);\n    Mat_& operator = (Mat_&& m);\n\n    Mat_(Mat&& m);\n    Mat_& operator = (Mat&& m);\n\n    Mat_(MatExpr&& e);\n#endif\n};\n\ntypedef Mat_<uchar> Mat1b;\ntypedef Mat_<Vec2b> Mat2b;\ntypedef Mat_<Vec3b> Mat3b;\ntypedef Mat_<Vec4b> Mat4b;\n\ntypedef Mat_<short> Mat1s;\ntypedef Mat_<Vec2s> Mat2s;\ntypedef Mat_<Vec3s> Mat3s;\ntypedef Mat_<Vec4s> Mat4s;\n\ntypedef Mat_<ushort> Mat1w;\ntypedef Mat_<Vec2w> Mat2w;\ntypedef Mat_<Vec3w> Mat3w;\ntypedef Mat_<Vec4w> Mat4w;\n\ntypedef Mat_<int>   Mat1i;\ntypedef Mat_<Vec2i> Mat2i;\ntypedef Mat_<Vec3i> Mat3i;\ntypedef Mat_<Vec4i> Mat4i;\n\ntypedef Mat_<float> Mat1f;\ntypedef Mat_<Vec2f> Mat2f;\ntypedef Mat_<Vec3f> Mat3f;\ntypedef Mat_<Vec4f> Mat4f;\n\ntypedef Mat_<double> Mat1d;\ntypedef Mat_<Vec2d> Mat2d;\ntypedef Mat_<Vec3d> Mat3d;\ntypedef Mat_<Vec4d> Mat4d;\n\n/** @todo document */\nclass CV_EXPORTS UMat\n{\npublic:\n    //! default constructor\n    UMat(UMatUsageFlags usageFlags = USAGE_DEFAULT);\n    //! constructs 2D matrix of the specified size and type\n    // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)\n    UMat(int rows, int cols, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT);\n    UMat(Size size, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT);\n    //! constucts 2D matrix and fills it with the specified value _s.\n    UMat(int rows, int cols, int type, const Scalar& s, UMatUsageFlags usageFlags = USAGE_DEFAULT);\n    UMat(Size size, int type, const Scalar& s, UMatUsageFlags usageFlags = USAGE_DEFAULT);\n\n    //! constructs n-dimensional matrix\n    UMat(int ndims, const int* sizes, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT);\n    UMat(int ndims, const int* sizes, int type, const Scalar& s, UMatUsageFlags usageFlags = USAGE_DEFAULT);\n\n    //! copy constructor\n    UMat(const UMat& m);\n\n    //! creates a matrix header for a part of the bigger matrix\n    UMat(const UMat& m, const Range& rowRange, const Range& colRange=Range::all());\n    UMat(const UMat& m, const Rect& roi);\n    UMat(const UMat& m, const Range* ranges);\n    //! builds matrix from std::vector with or without copying the data\n    template<typename _Tp> explicit UMat(const std::vector<_Tp>& vec, bool copyData=false);\n    //! builds matrix from cv::Vec; the data is copied by default\n    template<typename _Tp, int n> explicit UMat(const Vec<_Tp, n>& vec, bool copyData=true);\n    //! builds matrix from cv::Matx; the data is copied by default\n    template<typename _Tp, int m, int n> explicit UMat(const Matx<_Tp, m, n>& mtx, bool copyData=true);\n    //! builds matrix from a 2D point\n    template<typename _Tp> explicit UMat(const Point_<_Tp>& pt, bool copyData=true);\n    //! builds matrix from a 3D point\n    template<typename _Tp> explicit UMat(const Point3_<_Tp>& pt, bool copyData=true);\n    //! builds matrix from comma initializer\n    template<typename _Tp> explicit UMat(const MatCommaInitializer_<_Tp>& commaInitializer);\n\n    //! destructor - calls release()\n    ~UMat();\n    //! assignment operators\n    UMat& operator = (const UMat& m);\n\n    Mat getMat(int flags) const;\n\n    //! returns a new matrix header for the specified row\n    UMat row(int y) const;\n    //! returns a new matrix header for the specified column\n    UMat col(int x) const;\n    //! ... for the specified row span\n    UMat rowRange(int startrow, int endrow) const;\n    UMat rowRange(const Range& r) const;\n    //! ... for the specified column span\n    UMat colRange(int startcol, int endcol) const;\n    UMat colRange(const Range& r) const;\n    //! ... for the specified diagonal\n    // (d=0 - the main diagonal,\n    //  >0 - a diagonal from the lower half,\n    //  <0 - a diagonal from the upper half)\n    UMat diag(int d=0) const;\n    //! constructs a square diagonal matrix which main diagonal is vector \"d\"\n    static UMat diag(const UMat& d);\n\n    //! returns deep copy of the matrix, i.e. the data is copied\n    UMat clone() const;\n    //! copies the matrix content to \"m\".\n    // It calls m.create(this->size(), this->type()).\n    void copyTo( OutputArray m ) const;\n    //! copies those matrix elements to \"m\" that are marked with non-zero mask elements.\n    void copyTo( OutputArray m, InputArray mask ) const;\n    //! converts matrix to another datatype with optional scalng. See cvConvertScale.\n    void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const;\n\n    void assignTo( UMat& m, int type=-1 ) const;\n\n    //! sets every matrix element to s\n    UMat& operator = (const Scalar& s);\n    //! sets some of the matrix elements to s, according to the mask\n    UMat& setTo(InputArray value, InputArray mask=noArray());\n    //! creates alternative matrix header for the same data, with different\n    // number of channels and/or different number of rows. see cvReshape.\n    UMat reshape(int cn, int rows=0) const;\n    UMat reshape(int cn, int newndims, const int* newsz) const;\n\n    //! matrix transposition by means of matrix expressions\n    UMat t() const;\n    //! matrix inversion by means of matrix expressions\n    UMat inv(int method=DECOMP_LU) const;\n    //! per-element matrix multiplication by means of matrix expressions\n    UMat mul(InputArray m, double scale=1) const;\n\n    //! computes dot-product\n    double dot(InputArray m) const;\n\n    //! Matlab-style matrix initialization\n    static UMat zeros(int rows, int cols, int type);\n    static UMat zeros(Size size, int type);\n    static UMat zeros(int ndims, const int* sz, int type);\n    static UMat ones(int rows, int cols, int type);\n    static UMat ones(Size size, int type);\n    static UMat ones(int ndims, const int* sz, int type);\n    static UMat eye(int rows, int cols, int type);\n    static UMat eye(Size size, int type);\n\n    //! allocates new matrix data unless the matrix already has specified size and type.\n    // previous data is unreferenced if needed.\n    void create(int rows, int cols, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT);\n    void create(Size size, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT);\n    void create(int ndims, const int* sizes, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT);\n\n    //! increases the reference counter; use with care to avoid memleaks\n    void addref();\n    //! decreases reference counter;\n    // deallocates the data when reference counter reaches 0.\n    void release();\n\n    //! deallocates the matrix data\n    void deallocate();\n    //! internal use function; properly re-allocates _size, _step arrays\n    void copySize(const UMat& m);\n\n    //! locates matrix header within a parent matrix. See below\n    void locateROI( Size& wholeSize, Point& ofs ) const;\n    //! moves/resizes the current matrix ROI inside the parent matrix.\n    UMat& adjustROI( int dtop, int dbottom, int dleft, int dright );\n    //! extracts a rectangular sub-matrix\n    // (this is a generalized form of row, rowRange etc.)\n    UMat operator()( Range rowRange, Range colRange ) const;\n    UMat operator()( const Rect& roi ) const;\n    UMat operator()( const Range* ranges ) const;\n\n    //! returns true iff the matrix data is continuous\n    // (i.e. when there are no gaps between successive rows).\n    // similar to CV_IS_MAT_CONT(cvmat->type)\n    bool isContinuous() const;\n\n    //! returns true if the matrix is a submatrix of another matrix\n    bool isSubmatrix() const;\n\n    //! returns element size in bytes,\n    // similar to CV_ELEM_SIZE(cvmat->type)\n    size_t elemSize() const;\n    //! returns the size of element channel in bytes.\n    size_t elemSize1() const;\n    //! returns element type, similar to CV_MAT_TYPE(cvmat->type)\n    int type() const;\n    //! returns element type, similar to CV_MAT_DEPTH(cvmat->type)\n    int depth() const;\n    //! returns element type, similar to CV_MAT_CN(cvmat->type)\n    int channels() const;\n    //! returns step/elemSize1()\n    size_t step1(int i=0) const;\n    //! returns true if matrix data is NULL\n    bool empty() const;\n    //! returns the total number of matrix elements\n    size_t total() const;\n\n    //! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise\n    int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const;\n\n#ifdef CV_CXX_MOVE_SEMANTICS\n    UMat(UMat&& m);\n    UMat& operator = (UMat&& m);\n#endif\n\n    void* handle(int accessFlags) const;\n    void ndoffset(size_t* ofs) const;\n\n    enum { MAGIC_VAL  = 0x42FF0000, AUTO_STEP = 0, CONTINUOUS_FLAG = CV_MAT_CONT_FLAG, SUBMATRIX_FLAG = CV_SUBMAT_FLAG };\n    enum { MAGIC_MASK = 0xFFFF0000, TYPE_MASK = 0x00000FFF, DEPTH_MASK = 7 };\n\n    /*! includes several bit-fields:\n         - the magic signature\n         - continuity flag\n         - depth\n         - number of channels\n     */\n    int flags;\n    //! the matrix dimensionality, >= 2\n    int dims;\n    //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions\n    int rows, cols;\n\n    //! custom allocator\n    MatAllocator* allocator;\n    UMatUsageFlags usageFlags; // usage flags for allocator\n    //! and the standard allocator\n    static MatAllocator* getStdAllocator();\n\n    // black-box container of UMat data\n    UMatData* u;\n\n    // offset of the submatrix (or 0)\n    size_t offset;\n\n    MatSize size;\n    MatStep step;\n\nprotected:\n};\n\n\n/////////////////////////// multi-dimensional sparse matrix //////////////////////////\n\n/** @brief The class SparseMat represents multi-dimensional sparse numerical arrays.\n\nSuch a sparse array can store elements of any type that Mat can store. *Sparse* means that only\nnon-zero elements are stored (though, as a result of operations on a sparse matrix, some of its\nstored elements can actually become 0. It is up to you to detect such elements and delete them\nusing SparseMat::erase ). The non-zero elements are stored in a hash table that grows when it is\nfilled so that the search time is O(1) in average (regardless of whether element is there or not).\nElements can be accessed using the following methods:\n-   Query operations (SparseMat::ptr and the higher-level SparseMat::ref, SparseMat::value and\n    SparseMat::find), for example:\n    @code\n        const int dims = 5;\n        int size[] = {10, 10, 10, 10, 10};\n        SparseMat sparse_mat(dims, size, CV_32F);\n        for(int i = 0; i < 1000; i++)\n        {\n            int idx[dims];\n            for(int k = 0; k < dims; k++)\n                idx[k] = rand()\n            sparse_mat.ref<float>(idx) += 1.f;\n        }\n    @endcode\n-   Sparse matrix iterators. They are similar to MatIterator but different from NAryMatIterator.\n    That is, the iteration loop is familiar to STL users:\n    @code\n        // prints elements of a sparse floating-point matrix\n        // and the sum of elements.\n        SparseMatConstIterator_<float>\n            it = sparse_mat.begin<float>(),\n            it_end = sparse_mat.end<float>();\n        double s = 0;\n        int dims = sparse_mat.dims();\n        for(; it != it_end; ++it)\n        {\n            // print element indices and the element value\n            const SparseMat::Node* n = it.node();\n            printf(\"(\");\n            for(int i = 0; i < dims; i++)\n                printf(\"%d%s\", n->idx[i], i < dims-1 ? \", \" : \")\");\n            printf(\": %g\\n\", it.value<float>());\n            s += *it;\n        }\n        printf(\"Element sum is %g\\n\", s);\n    @endcode\n    If you run this loop, you will notice that elements are not enumerated in a logical order\n    (lexicographical, and so on). They come in the same order as they are stored in the hash table\n    (semi-randomly). You may collect pointers to the nodes and sort them to get the proper ordering.\n    Note, however, that pointers to the nodes may become invalid when you add more elements to the\n    matrix. This may happen due to possible buffer reallocation.\n-   Combination of the above 2 methods when you need to process 2 or more sparse matrices\n    simultaneously. For example, this is how you can compute unnormalized cross-correlation of the 2\n    floating-point sparse matrices:\n    @code\n        double cross_corr(const SparseMat& a, const SparseMat& b)\n        {\n            const SparseMat *_a = &a, *_b = &b;\n            // if b contains less elements than a,\n            // it is faster to iterate through b\n            if(_a->nzcount() > _b->nzcount())\n                std::swap(_a, _b);\n            SparseMatConstIterator_<float> it = _a->begin<float>(),\n                                           it_end = _a->end<float>();\n            double ccorr = 0;\n            for(; it != it_end; ++it)\n            {\n                // take the next element from the first matrix\n                float avalue = *it;\n                const Node* anode = it.node();\n                // and try to find an element with the same index in the second matrix.\n                // since the hash value depends only on the element index,\n                // reuse the hash value stored in the node\n                float bvalue = _b->value<float>(anode->idx,&anode->hashval);\n                ccorr += avalue*bvalue;\n            }\n            return ccorr;\n        }\n    @endcode\n */\nclass CV_EXPORTS SparseMat\n{\npublic:\n    typedef SparseMatIterator iterator;\n    typedef SparseMatConstIterator const_iterator;\n\n    enum { MAGIC_VAL=0x42FD0000, MAX_DIM=32, HASH_SCALE=0x5bd1e995, HASH_BIT=0x80000000 };\n\n    //! the sparse matrix header\n    struct CV_EXPORTS Hdr\n    {\n        Hdr(int _dims, const int* _sizes, int _type);\n        void clear();\n        int refcount;\n        int dims;\n        int valueOffset;\n        size_t nodeSize;\n        size_t nodeCount;\n        size_t freeList;\n        std::vector<uchar> pool;\n        std::vector<size_t> hashtab;\n        int size[MAX_DIM];\n    };\n\n    //! sparse matrix node - element of a hash table\n    struct CV_EXPORTS Node\n    {\n        //! hash value\n        size_t hashval;\n        //! index of the next node in the same hash table entry\n        size_t next;\n        //! index of the matrix element\n        int idx[MAX_DIM];\n    };\n\n    /** @brief Various SparseMat constructors.\n     */\n    SparseMat();\n\n    /** @overload\n    @param dims Array dimensionality.\n    @param _sizes Sparce matrix size on all dementions.\n    @param _type Sparse matrix data type.\n    */\n    SparseMat(int dims, const int* _sizes, int _type);\n\n    /** @overload\n    @param m Source matrix for copy constructor. If m is dense matrix (ocvMat) then it will be converted\n    to sparse representation.\n    */\n    SparseMat(const SparseMat& m);\n\n    /** @overload\n    @param m Source matrix for copy constructor. If m is dense matrix (ocvMat) then it will be converted\n    to sparse representation.\n    */\n    explicit SparseMat(const Mat& m);\n\n    //! the destructor\n    ~SparseMat();\n\n    //! assignment operator. This is O(1) operation, i.e. no data is copied\n    SparseMat& operator = (const SparseMat& m);\n    //! equivalent to the corresponding constructor\n    SparseMat& operator = (const Mat& m);\n\n    //! creates full copy of the matrix\n    SparseMat clone() const;\n\n    //! copies all the data to the destination matrix. All the previous content of m is erased\n    void copyTo( SparseMat& m ) const;\n    //! converts sparse matrix to dense matrix.\n    void copyTo( Mat& m ) const;\n    //! multiplies all the matrix elements by the specified scale factor alpha and converts the results to the specified data type\n    void convertTo( SparseMat& m, int rtype, double alpha=1 ) const;\n    //! converts sparse matrix to dense n-dim matrix with optional type conversion and scaling.\n    /*!\n        @param [out] m - output matrix; if it does not have a proper size or type before the operation,\n            it is reallocated\n        @param [in] rtype – desired output matrix type or, rather, the depth since the number of channels\n            are the same as the input has; if rtype is negative, the output matrix will have the\n            same type as the input.\n        @param [in] alpha – optional scale factor\n        @param [in] beta – optional delta added to the scaled values\n    */\n    void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const;\n\n    // not used now\n    void assignTo( SparseMat& m, int type=-1 ) const;\n\n    //! reallocates sparse matrix.\n    /*!\n        If the matrix already had the proper size and type,\n        it is simply cleared with clear(), otherwise,\n        the old matrix is released (using release()) and the new one is allocated.\n    */\n    void create(int dims, const int* _sizes, int _type);\n    //! sets all the sparse matrix elements to 0, which means clearing the hash table.\n    void clear();\n    //! manually increments the reference counter to the header.\n    void addref();\n    // decrements the header reference counter. When the counter reaches 0, the header and all the underlying data are deallocated.\n    void release();\n\n    //! converts sparse matrix to the old-style representation; all the elements are copied.\n    //operator CvSparseMat*() const;\n    //! returns the size of each element in bytes (not including the overhead - the space occupied by SparseMat::Node elements)\n    size_t elemSize() const;\n    //! returns elemSize()/channels()\n    size_t elemSize1() const;\n\n    //! returns type of sparse matrix elements\n    int type() const;\n    //! returns the depth of sparse matrix elements\n    int depth() const;\n    //! returns the number of channels\n    int channels() const;\n\n    //! returns the array of sizes, or NULL if the matrix is not allocated\n    const int* size() const;\n    //! returns the size of i-th matrix dimension (or 0)\n    int size(int i) const;\n    //! returns the matrix dimensionality\n    int dims() const;\n    //! returns the number of non-zero elements (=the number of hash table nodes)\n    size_t nzcount() const;\n\n    //! computes the element hash value (1D case)\n    size_t hash(int i0) const;\n    //! computes the element hash value (2D case)\n    size_t hash(int i0, int i1) const;\n    //! computes the element hash value (3D case)\n    size_t hash(int i0, int i1, int i2) const;\n    //! computes the element hash value (nD case)\n    size_t hash(const int* idx) const;\n\n    //!@{\n    /*!\n     specialized variants for 1D, 2D, 3D cases and the generic_type one for n-D case.\n     return pointer to the matrix element.\n      - if the element is there (it's non-zero), the pointer to it is returned\n      - if it's not there and createMissing=false, NULL pointer is returned\n      - if it's not there and createMissing=true, then the new element\n        is created and initialized with 0. Pointer to it is returned\n      - if the optional hashval pointer is not NULL, the element hash value is\n        not computed, but *hashval is taken instead.\n    */\n    //! returns pointer to the specified element (1D case)\n    uchar* ptr(int i0, bool createMissing, size_t* hashval=0);\n    //! returns pointer to the specified element (2D case)\n    uchar* ptr(int i0, int i1, bool createMissing, size_t* hashval=0);\n    //! returns pointer to the specified element (3D case)\n    uchar* ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0);\n    //! returns pointer to the specified element (nD case)\n    uchar* ptr(const int* idx, bool createMissing, size_t* hashval=0);\n    //!@}\n\n    //!@{\n    /*!\n     return read-write reference to the specified sparse matrix element.\n\n     `ref<_Tp>(i0,...[,hashval])` is equivalent to `*(_Tp*)ptr(i0,...,true[,hashval])`.\n     The methods always return a valid reference.\n     If the element did not exist, it is created and initialiazed with 0.\n    */\n    //! returns reference to the specified element (1D case)\n    template<typename _Tp> _Tp& ref(int i0, size_t* hashval=0);\n    //! returns reference to the specified element (2D case)\n    template<typename _Tp> _Tp& ref(int i0, int i1, size_t* hashval=0);\n    //! returns reference to the specified element (3D case)\n    template<typename _Tp> _Tp& ref(int i0, int i1, int i2, size_t* hashval=0);\n    //! returns reference to the specified element (nD case)\n    template<typename _Tp> _Tp& ref(const int* idx, size_t* hashval=0);\n    //!@}\n\n    //!@{\n    /*!\n     return value of the specified sparse matrix element.\n\n     `value<_Tp>(i0,...[,hashval])` is equivalent to\n     @code\n     { const _Tp* p = find<_Tp>(i0,...[,hashval]); return p ? *p : _Tp(); }\n     @endcode\n\n     That is, if the element did not exist, the methods return 0.\n     */\n    //! returns value of the specified element (1D case)\n    template<typename _Tp> _Tp value(int i0, size_t* hashval=0) const;\n    //! returns value of the specified element (2D case)\n    template<typename _Tp> _Tp value(int i0, int i1, size_t* hashval=0) const;\n    //! returns value of the specified element (3D case)\n    template<typename _Tp> _Tp value(int i0, int i1, int i2, size_t* hashval=0) const;\n    //! returns value of the specified element (nD case)\n    template<typename _Tp> _Tp value(const int* idx, size_t* hashval=0) const;\n    //!@}\n\n    //!@{\n    /*!\n     Return pointer to the specified sparse matrix element if it exists\n\n     `find<_Tp>(i0,...[,hashval])` is equivalent to `(_const Tp*)ptr(i0,...false[,hashval])`.\n\n     If the specified element does not exist, the methods return NULL.\n    */\n    //! returns pointer to the specified element (1D case)\n    template<typename _Tp> const _Tp* find(int i0, size_t* hashval=0) const;\n    //! returns pointer to the specified element (2D case)\n    template<typename _Tp> const _Tp* find(int i0, int i1, size_t* hashval=0) const;\n    //! returns pointer to the specified element (3D case)\n    template<typename _Tp> const _Tp* find(int i0, int i1, int i2, size_t* hashval=0) const;\n    //! returns pointer to the specified element (nD case)\n    template<typename _Tp> const _Tp* find(const int* idx, size_t* hashval=0) const;\n    //!@}\n\n    //! erases the specified element (2D case)\n    void erase(int i0, int i1, size_t* hashval=0);\n    //! erases the specified element (3D case)\n    void erase(int i0, int i1, int i2, size_t* hashval=0);\n    //! erases the specified element (nD case)\n    void erase(const int* idx, size_t* hashval=0);\n\n    //!@{\n    /*!\n       return the sparse matrix iterator pointing to the first sparse matrix element\n    */\n    //! returns the sparse matrix iterator at the matrix beginning\n    SparseMatIterator begin();\n    //! returns the sparse matrix iterator at the matrix beginning\n    template<typename _Tp> SparseMatIterator_<_Tp> begin();\n    //! returns the read-only sparse matrix iterator at the matrix beginning\n    SparseMatConstIterator begin() const;\n    //! returns the read-only sparse matrix iterator at the matrix beginning\n    template<typename _Tp> SparseMatConstIterator_<_Tp> begin() const;\n    //!@}\n    /*!\n       return the sparse matrix iterator pointing to the element following the last sparse matrix element\n    */\n    //! returns the sparse matrix iterator at the matrix end\n    SparseMatIterator end();\n    //! returns the read-only sparse matrix iterator at the matrix end\n    SparseMatConstIterator end() const;\n    //! returns the typed sparse matrix iterator at the matrix end\n    template<typename _Tp> SparseMatIterator_<_Tp> end();\n    //! returns the typed read-only sparse matrix iterator at the matrix end\n    template<typename _Tp> SparseMatConstIterator_<_Tp> end() const;\n\n    //! returns the value stored in the sparse martix node\n    template<typename _Tp> _Tp& value(Node* n);\n    //! returns the value stored in the sparse martix node\n    template<typename _Tp> const _Tp& value(const Node* n) const;\n\n    ////////////// some internal-use methods ///////////////\n    Node* node(size_t nidx);\n    const Node* node(size_t nidx) const;\n\n    uchar* newNode(const int* idx, size_t hashval);\n    void removeNode(size_t hidx, size_t nidx, size_t previdx);\n    void resizeHashTab(size_t newsize);\n\n    int flags;\n    Hdr* hdr;\n};\n\n\n\n///////////////////////////////// SparseMat_<_Tp> ////////////////////////////////////\n\n/** @brief Template sparse n-dimensional array class derived from SparseMat\n\nSparseMat_ is a thin wrapper on top of SparseMat created in the same way as Mat_ . It simplifies\nnotation of some operations:\n@code\n    int sz[] = {10, 20, 30};\n    SparseMat_<double> M(3, sz);\n    ...\n    M.ref(1, 2, 3) = M(4, 5, 6) + M(7, 8, 9);\n@endcode\n */\ntemplate<typename _Tp> class SparseMat_ : public SparseMat\n{\npublic:\n    typedef SparseMatIterator_<_Tp> iterator;\n    typedef SparseMatConstIterator_<_Tp> const_iterator;\n\n    //! the default constructor\n    SparseMat_();\n    //! the full constructor equivelent to SparseMat(dims, _sizes, DataType<_Tp>::type)\n    SparseMat_(int dims, const int* _sizes);\n    //! the copy constructor. If DataType<_Tp>.type != m.type(), the m elements are converted\n    SparseMat_(const SparseMat& m);\n    //! the copy constructor. This is O(1) operation - no data is copied\n    SparseMat_(const SparseMat_& m);\n    //! converts dense matrix to the sparse form\n    SparseMat_(const Mat& m);\n    //! converts the old-style sparse matrix to the C++ class. All the elements are copied\n    //SparseMat_(const CvSparseMat* m);\n    //! the assignment operator. If DataType<_Tp>.type != m.type(), the m elements are converted\n    SparseMat_& operator = (const SparseMat& m);\n    //! the assignment operator. This is O(1) operation - no data is copied\n    SparseMat_& operator = (const SparseMat_& m);\n    //! converts dense matrix to the sparse form\n    SparseMat_& operator = (const Mat& m);\n\n    //! makes full copy of the matrix. All the elements are duplicated\n    SparseMat_ clone() const;\n    //! equivalent to cv::SparseMat::create(dims, _sizes, DataType<_Tp>::type)\n    void create(int dims, const int* _sizes);\n    //! converts sparse matrix to the old-style CvSparseMat. All the elements are copied\n    //operator CvSparseMat*() const;\n\n    //! returns type of the matrix elements\n    int type() const;\n    //! returns depth of the matrix elements\n    int depth() const;\n    //! returns the number of channels in each matrix element\n    int channels() const;\n\n    //! equivalent to SparseMat::ref<_Tp>(i0, hashval)\n    _Tp& ref(int i0, size_t* hashval=0);\n    //! equivalent to SparseMat::ref<_Tp>(i0, i1, hashval)\n    _Tp& ref(int i0, int i1, size_t* hashval=0);\n    //! equivalent to SparseMat::ref<_Tp>(i0, i1, i2, hashval)\n    _Tp& ref(int i0, int i1, int i2, size_t* hashval=0);\n    //! equivalent to SparseMat::ref<_Tp>(idx, hashval)\n    _Tp& ref(const int* idx, size_t* hashval=0);\n\n    //! equivalent to SparseMat::value<_Tp>(i0, hashval)\n    _Tp operator()(int i0, size_t* hashval=0) const;\n    //! equivalent to SparseMat::value<_Tp>(i0, i1, hashval)\n    _Tp operator()(int i0, int i1, size_t* hashval=0) const;\n    //! equivalent to SparseMat::value<_Tp>(i0, i1, i2, hashval)\n    _Tp operator()(int i0, int i1, int i2, size_t* hashval=0) const;\n    //! equivalent to SparseMat::value<_Tp>(idx, hashval)\n    _Tp operator()(const int* idx, size_t* hashval=0) const;\n\n    //! returns sparse matrix iterator pointing to the first sparse matrix element\n    SparseMatIterator_<_Tp> begin();\n    //! returns read-only sparse matrix iterator pointing to the first sparse matrix element\n    SparseMatConstIterator_<_Tp> begin() const;\n    //! returns sparse matrix iterator pointing to the element following the last sparse matrix element\n    SparseMatIterator_<_Tp> end();\n    //! returns read-only sparse matrix iterator pointing to the element following the last sparse matrix element\n    SparseMatConstIterator_<_Tp> end() const;\n};\n\n\n\n////////////////////////////////// MatConstIterator //////////////////////////////////\n\nclass CV_EXPORTS MatConstIterator\n{\npublic:\n    typedef uchar* value_type;\n    typedef ptrdiff_t difference_type;\n    typedef const uchar** pointer;\n    typedef uchar* reference;\n\n#ifndef OPENCV_NOSTL\n    typedef std::random_access_iterator_tag iterator_category;\n#endif\n\n    //! default constructor\n    MatConstIterator();\n    //! constructor that sets the iterator to the beginning of the matrix\n    MatConstIterator(const Mat* _m);\n    //! constructor that sets the iterator to the specified element of the matrix\n    MatConstIterator(const Mat* _m, int _row, int _col=0);\n    //! constructor that sets the iterator to the specified element of the matrix\n    MatConstIterator(const Mat* _m, Point _pt);\n    //! constructor that sets the iterator to the specified element of the matrix\n    MatConstIterator(const Mat* _m, const int* _idx);\n    //! copy constructor\n    MatConstIterator(const MatConstIterator& it);\n\n    //! copy operator\n    MatConstIterator& operator = (const MatConstIterator& it);\n    //! returns the current matrix element\n    const uchar* operator *() const;\n    //! returns the i-th matrix element, relative to the current\n    const uchar* operator [](ptrdiff_t i) const;\n\n    //! shifts the iterator forward by the specified number of elements\n    MatConstIterator& operator += (ptrdiff_t ofs);\n    //! shifts the iterator backward by the specified number of elements\n    MatConstIterator& operator -= (ptrdiff_t ofs);\n    //! decrements the iterator\n    MatConstIterator& operator --();\n    //! decrements the iterator\n    MatConstIterator operator --(int);\n    //! increments the iterator\n    MatConstIterator& operator ++();\n    //! increments the iterator\n    MatConstIterator operator ++(int);\n    //! returns the current iterator position\n    Point pos() const;\n    //! returns the current iterator position\n    void pos(int* _idx) const;\n\n    ptrdiff_t lpos() const;\n    void seek(ptrdiff_t ofs, bool relative = false);\n    void seek(const int* _idx, bool relative = false);\n\n    const Mat* m;\n    size_t elemSize;\n    const uchar* ptr;\n    const uchar* sliceStart;\n    const uchar* sliceEnd;\n};\n\n\n\n////////////////////////////////// MatConstIterator_ /////////////////////////////////\n\n/** @brief Matrix read-only iterator\n */\ntemplate<typename _Tp>\nclass MatConstIterator_ : public MatConstIterator\n{\npublic:\n    typedef _Tp value_type;\n    typedef ptrdiff_t difference_type;\n    typedef const _Tp* pointer;\n    typedef const _Tp& reference;\n\n#ifndef OPENCV_NOSTL\n    typedef std::random_access_iterator_tag iterator_category;\n#endif\n\n    //! default constructor\n    MatConstIterator_();\n    //! constructor that sets the iterator to the beginning of the matrix\n    MatConstIterator_(const Mat_<_Tp>* _m);\n    //! constructor that sets the iterator to the specified element of the matrix\n    MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col=0);\n    //! constructor that sets the iterator to the specified element of the matrix\n    MatConstIterator_(const Mat_<_Tp>* _m, Point _pt);\n    //! constructor that sets the iterator to the specified element of the matrix\n    MatConstIterator_(const Mat_<_Tp>* _m, const int* _idx);\n    //! copy constructor\n    MatConstIterator_(const MatConstIterator_& it);\n\n    //! copy operator\n    MatConstIterator_& operator = (const MatConstIterator_& it);\n    //! returns the current matrix element\n    _Tp operator *() const;\n    //! returns the i-th matrix element, relative to the current\n    _Tp operator [](ptrdiff_t i) const;\n\n    //! shifts the iterator forward by the specified number of elements\n    MatConstIterator_& operator += (ptrdiff_t ofs);\n    //! shifts the iterator backward by the specified number of elements\n    MatConstIterator_& operator -= (ptrdiff_t ofs);\n    //! decrements the iterator\n    MatConstIterator_& operator --();\n    //! decrements the iterator\n    MatConstIterator_ operator --(int);\n    //! increments the iterator\n    MatConstIterator_& operator ++();\n    //! increments the iterator\n    MatConstIterator_ operator ++(int);\n    //! returns the current iterator position\n    Point pos() const;\n};\n\n\n\n//////////////////////////////////// MatIterator_ ////////////////////////////////////\n\n/** @brief Matrix read-write iterator\n*/\ntemplate<typename _Tp>\nclass MatIterator_ : public MatConstIterator_<_Tp>\n{\npublic:\n    typedef _Tp* pointer;\n    typedef _Tp& reference;\n\n#ifndef OPENCV_NOSTL\n    typedef std::random_access_iterator_tag iterator_category;\n#endif\n\n    //! the default constructor\n    MatIterator_();\n    //! constructor that sets the iterator to the beginning of the matrix\n    MatIterator_(Mat_<_Tp>* _m);\n    //! constructor that sets the iterator to the specified element of the matrix\n    MatIterator_(Mat_<_Tp>* _m, int _row, int _col=0);\n    //! constructor that sets the iterator to the specified element of the matrix\n    MatIterator_(Mat_<_Tp>* _m, Point _pt);\n    //! constructor that sets the iterator to the specified element of the matrix\n    MatIterator_(Mat_<_Tp>* _m, const int* _idx);\n    //! copy constructor\n    MatIterator_(const MatIterator_& it);\n    //! copy operator\n    MatIterator_& operator = (const MatIterator_<_Tp>& it );\n\n    //! returns the current matrix element\n    _Tp& operator *() const;\n    //! returns the i-th matrix element, relative to the current\n    _Tp& operator [](ptrdiff_t i) const;\n\n    //! shifts the iterator forward by the specified number of elements\n    MatIterator_& operator += (ptrdiff_t ofs);\n    //! shifts the iterator backward by the specified number of elements\n    MatIterator_& operator -= (ptrdiff_t ofs);\n    //! decrements the iterator\n    MatIterator_& operator --();\n    //! decrements the iterator\n    MatIterator_ operator --(int);\n    //! increments the iterator\n    MatIterator_& operator ++();\n    //! increments the iterator\n    MatIterator_ operator ++(int);\n};\n\n\n\n/////////////////////////////// SparseMatConstIterator ///////////////////////////////\n\n/**  @brief Read-Only Sparse Matrix Iterator.\n\n Here is how to use the iterator to compute the sum of floating-point sparse matrix elements:\n\n \\code\n SparseMatConstIterator it = m.begin(), it_end = m.end();\n double s = 0;\n CV_Assert( m.type() == CV_32F );\n for( ; it != it_end; ++it )\n    s += it.value<float>();\n \\endcode\n*/\nclass CV_EXPORTS SparseMatConstIterator\n{\npublic:\n    //! the default constructor\n    SparseMatConstIterator();\n    //! the full constructor setting the iterator to the first sparse matrix element\n    SparseMatConstIterator(const SparseMat* _m);\n    //! the copy constructor\n    SparseMatConstIterator(const SparseMatConstIterator& it);\n\n    //! the assignment operator\n    SparseMatConstIterator& operator = (const SparseMatConstIterator& it);\n\n    //! template method returning the current matrix element\n    template<typename _Tp> const _Tp& value() const;\n    //! returns the current node of the sparse matrix. it.node->idx is the current element index\n    const SparseMat::Node* node() const;\n\n    //! moves iterator to the previous element\n    SparseMatConstIterator& operator --();\n    //! moves iterator to the previous element\n    SparseMatConstIterator operator --(int);\n    //! moves iterator to the next element\n    SparseMatConstIterator& operator ++();\n    //! moves iterator to the next element\n    SparseMatConstIterator operator ++(int);\n\n    //! moves iterator to the element after the last element\n    void seekEnd();\n\n    const SparseMat* m;\n    size_t hashidx;\n    uchar* ptr;\n};\n\n\n\n////////////////////////////////// SparseMatIterator /////////////////////////////////\n\n/** @brief  Read-write Sparse Matrix Iterator\n\n The class is similar to cv::SparseMatConstIterator,\n but can be used for in-place modification of the matrix elements.\n*/\nclass CV_EXPORTS SparseMatIterator : public SparseMatConstIterator\n{\npublic:\n    //! the default constructor\n    SparseMatIterator();\n    //! the full constructor setting the iterator to the first sparse matrix element\n    SparseMatIterator(SparseMat* _m);\n    //! the full constructor setting the iterator to the specified sparse matrix element\n    SparseMatIterator(SparseMat* _m, const int* idx);\n    //! the copy constructor\n    SparseMatIterator(const SparseMatIterator& it);\n\n    //! the assignment operator\n    SparseMatIterator& operator = (const SparseMatIterator& it);\n    //! returns read-write reference to the current sparse matrix element\n    template<typename _Tp> _Tp& value() const;\n    //! returns pointer to the current sparse matrix node. it.node->idx is the index of the current element (do not modify it!)\n    SparseMat::Node* node() const;\n\n    //! moves iterator to the next element\n    SparseMatIterator& operator ++();\n    //! moves iterator to the next element\n    SparseMatIterator operator ++(int);\n};\n\n\n\n/////////////////////////////// SparseMatConstIterator_ //////////////////////////////\n\n/** @brief  Template Read-Only Sparse Matrix Iterator Class.\n\n This is the derived from SparseMatConstIterator class that\n introduces more convenient operator *() for accessing the current element.\n*/\ntemplate<typename _Tp> class SparseMatConstIterator_ : public SparseMatConstIterator\n{\npublic:\n\n#ifndef OPENCV_NOSTL\n    typedef std::forward_iterator_tag iterator_category;\n#endif\n\n    //! the default constructor\n    SparseMatConstIterator_();\n    //! the full constructor setting the iterator to the first sparse matrix element\n    SparseMatConstIterator_(const SparseMat_<_Tp>* _m);\n    SparseMatConstIterator_(const SparseMat* _m);\n    //! the copy constructor\n    SparseMatConstIterator_(const SparseMatConstIterator_& it);\n\n    //! the assignment operator\n    SparseMatConstIterator_& operator = (const SparseMatConstIterator_& it);\n    //! the element access operator\n    const _Tp& operator *() const;\n\n    //! moves iterator to the next element\n    SparseMatConstIterator_& operator ++();\n    //! moves iterator to the next element\n    SparseMatConstIterator_ operator ++(int);\n};\n\n\n\n///////////////////////////////// SparseMatIterator_ /////////////////////////////////\n\n/** @brief  Template Read-Write Sparse Matrix Iterator Class.\n\n This is the derived from cv::SparseMatConstIterator_ class that\n introduces more convenient operator *() for accessing the current element.\n*/\ntemplate<typename _Tp> class SparseMatIterator_ : public SparseMatConstIterator_<_Tp>\n{\npublic:\n\n#ifndef OPENCV_NOSTL\n    typedef std::forward_iterator_tag iterator_category;\n#endif\n\n    //! the default constructor\n    SparseMatIterator_();\n    //! the full constructor setting the iterator to the first sparse matrix element\n    SparseMatIterator_(SparseMat_<_Tp>* _m);\n    SparseMatIterator_(SparseMat* _m);\n    //! the copy constructor\n    SparseMatIterator_(const SparseMatIterator_& it);\n\n    //! the assignment operator\n    SparseMatIterator_& operator = (const SparseMatIterator_& it);\n    //! returns the reference to the current element\n    _Tp& operator *() const;\n\n    //! moves the iterator to the next element\n    SparseMatIterator_& operator ++();\n    //! moves the iterator to the next element\n    SparseMatIterator_ operator ++(int);\n};\n\n\n\n/////////////////////////////////// NAryMatIterator //////////////////////////////////\n\n/** @brief n-ary multi-dimensional array iterator.\n\nUse the class to implement unary, binary, and, generally, n-ary element-wise operations on\nmulti-dimensional arrays. Some of the arguments of an n-ary function may be continuous arrays, some\nmay be not. It is possible to use conventional MatIterator 's for each array but incrementing all of\nthe iterators after each small operations may be a big overhead. In this case consider using\nNAryMatIterator to iterate through several matrices simultaneously as long as they have the same\ngeometry (dimensionality and all the dimension sizes are the same). On each iteration `it.planes[0]`,\n`it.planes[1]`,... will be the slices of the corresponding matrices.\n\nThe example below illustrates how you can compute a normalized and threshold 3D color histogram:\n@code\n    void computeNormalizedColorHist(const Mat& image, Mat& hist, int N, double minProb)\n    {\n        const int histSize[] = {N, N, N};\n\n        // make sure that the histogram has a proper size and type\n        hist.create(3, histSize, CV_32F);\n\n        // and clear it\n        hist = Scalar(0);\n\n        // the loop below assumes that the image\n        // is a 8-bit 3-channel. check it.\n        CV_Assert(image.type() == CV_8UC3);\n        MatConstIterator_<Vec3b> it = image.begin<Vec3b>(),\n                                 it_end = image.end<Vec3b>();\n        for( ; it != it_end; ++it )\n        {\n            const Vec3b& pix = *it;\n            hist.at<float>(pix[0]*N/256, pix[1]*N/256, pix[2]*N/256) += 1.f;\n        }\n\n        minProb *= image.rows*image.cols;\n        Mat plane;\n        NAryMatIterator it(&hist, &plane, 1);\n        double s = 0;\n        // iterate through the matrix. on each iteration\n        // it.planes[*] (of type Mat) will be set to the current plane.\n        for(int p = 0; p < it.nplanes; p++, ++it)\n        {\n            threshold(it.planes[0], it.planes[0], minProb, 0, THRESH_TOZERO);\n            s += sum(it.planes[0])[0];\n        }\n\n        s = 1./s;\n        it = NAryMatIterator(&hist, &plane, 1);\n        for(int p = 0; p < it.nplanes; p++, ++it)\n            it.planes[0] *= s;\n    }\n@endcode\n */\nclass CV_EXPORTS NAryMatIterator\n{\npublic:\n    //! the default constructor\n    NAryMatIterator();\n    //! the full constructor taking arbitrary number of n-dim matrices\n    NAryMatIterator(const Mat** arrays, uchar** ptrs, int narrays=-1);\n    //! the full constructor taking arbitrary number of n-dim matrices\n    NAryMatIterator(const Mat** arrays, Mat* planes, int narrays=-1);\n    //! the separate iterator initialization method\n    void init(const Mat** arrays, Mat* planes, uchar** ptrs, int narrays=-1);\n\n    //! proceeds to the next plane of every iterated matrix\n    NAryMatIterator& operator ++();\n    //! proceeds to the next plane of every iterated matrix (postfix increment operator)\n    NAryMatIterator operator ++(int);\n\n    //! the iterated arrays\n    const Mat** arrays;\n    //! the current planes\n    Mat* planes;\n    //! data pointers\n    uchar** ptrs;\n    //! the number of arrays\n    int narrays;\n    //! the number of hyper-planes that the iterator steps through\n    size_t nplanes;\n    //! the size of each segment (in elements)\n    size_t size;\nprotected:\n    int iterdepth;\n    size_t idx;\n};\n\n\n\n///////////////////////////////// Matrix Expressions /////////////////////////////////\n\nclass CV_EXPORTS MatOp\n{\npublic:\n    MatOp();\n    virtual ~MatOp();\n\n    virtual bool elementWise(const MatExpr& expr) const;\n    virtual void assign(const MatExpr& expr, Mat& m, int type=-1) const = 0;\n    virtual void roi(const MatExpr& expr, const Range& rowRange,\n                     const Range& colRange, MatExpr& res) const;\n    virtual void diag(const MatExpr& expr, int d, MatExpr& res) const;\n    virtual void augAssignAdd(const MatExpr& expr, Mat& m) const;\n    virtual void augAssignSubtract(const MatExpr& expr, Mat& m) const;\n    virtual void augAssignMultiply(const MatExpr& expr, Mat& m) const;\n    virtual void augAssignDivide(const MatExpr& expr, Mat& m) const;\n    virtual void augAssignAnd(const MatExpr& expr, Mat& m) const;\n    virtual void augAssignOr(const MatExpr& expr, Mat& m) const;\n    virtual void augAssignXor(const MatExpr& expr, Mat& m) const;\n\n    virtual void add(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const;\n    virtual void add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const;\n\n    virtual void subtract(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const;\n    virtual void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const;\n\n    virtual void multiply(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const;\n    virtual void multiply(const MatExpr& expr1, double s, MatExpr& res) const;\n\n    virtual void divide(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const;\n    virtual void divide(double s, const MatExpr& expr, MatExpr& res) const;\n\n    virtual void abs(const MatExpr& expr, MatExpr& res) const;\n\n    virtual void transpose(const MatExpr& expr, MatExpr& res) const;\n    virtual void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const;\n    virtual void invert(const MatExpr& expr, int method, MatExpr& res) const;\n\n    virtual Size size(const MatExpr& expr) const;\n    virtual int type(const MatExpr& expr) const;\n};\n\n/** @brief Matrix expression representation\n@anchor MatrixExpressions\nThis is a list of implemented matrix operations that can be combined in arbitrary complex\nexpressions (here A, B stand for matrices ( Mat ), s for a scalar ( Scalar ), alpha for a\nreal-valued scalar ( double )):\n-   Addition, subtraction, negation: `A+B`, `A-B`, `A+s`, `A-s`, `s+A`, `s-A`, `-A`\n-   Scaling: `A*alpha`\n-   Per-element multiplication and division: `A.mul(B)`, `A/B`, `alpha/A`\n-   Matrix multiplication: `A*B`\n-   Transposition: `A.t()` (means A<sup>T</sup>)\n-   Matrix inversion and pseudo-inversion, solving linear systems and least-squares problems:\n    `A.inv([method]) (~ A<sup>-1</sup>)`,   `A.inv([method])*B (~ X: AX=B)`\n-   Comparison: `A cmpop B`, `A cmpop alpha`, `alpha cmpop A`, where *cmpop* is one of\n  `>`, `>=`, `==`, `!=`, `<=`, `<`. The result of comparison is an 8-bit single channel mask whose\n    elements are set to 255 (if the particular element or pair of elements satisfy the condition) or\n    0.\n-   Bitwise logical operations: `A logicop B`, `A logicop s`, `s logicop A`, `~A`, where *logicop* is one of\n  `&`, `|`, `^`.\n-   Element-wise minimum and maximum: `min(A, B)`, `min(A, alpha)`, `max(A, B)`, `max(A, alpha)`\n-   Element-wise absolute value: `abs(A)`\n-   Cross-product, dot-product: `A.cross(B)`, `A.dot(B)`\n-   Any function of matrix or matrices and scalars that returns a matrix or a scalar, such as norm,\n    mean, sum, countNonZero, trace, determinant, repeat, and others.\n-   Matrix initializers ( Mat::eye(), Mat::zeros(), Mat::ones() ), matrix comma-separated\n    initializers, matrix constructors and operators that extract sub-matrices (see Mat description).\n-   Mat_<destination_type>() constructors to cast the result to the proper type.\n@note Comma-separated initializers and probably some other operations may require additional\nexplicit Mat() or Mat_<T>() constructor calls to resolve a possible ambiguity.\n\nHere are examples of matrix expressions:\n@code\n    // compute pseudo-inverse of A, equivalent to A.inv(DECOMP_SVD)\n    SVD svd(A);\n    Mat pinvA = svd.vt.t()*Mat::diag(1./svd.w)*svd.u.t();\n\n    // compute the new vector of parameters in the Levenberg-Marquardt algorithm\n    x -= (A.t()*A + lambda*Mat::eye(A.cols,A.cols,A.type())).inv(DECOMP_CHOLESKY)*(A.t()*err);\n\n    // sharpen image using \"unsharp mask\" algorithm\n    Mat blurred; double sigma = 1, threshold = 5, amount = 1;\n    GaussianBlur(img, blurred, Size(), sigma, sigma);\n    Mat lowContrastMask = abs(img - blurred) < threshold;\n    Mat sharpened = img*(1+amount) + blurred*(-amount);\n    img.copyTo(sharpened, lowContrastMask);\n@endcode\n*/\nclass CV_EXPORTS MatExpr\n{\npublic:\n    MatExpr();\n    explicit MatExpr(const Mat& m);\n\n    MatExpr(const MatOp* _op, int _flags, const Mat& _a = Mat(), const Mat& _b = Mat(),\n            const Mat& _c = Mat(), double _alpha = 1, double _beta = 1, const Scalar& _s = Scalar());\n\n    operator Mat() const;\n    template<typename _Tp> operator Mat_<_Tp>() const;\n\n    Size size() const;\n    int type() const;\n\n    MatExpr row(int y) const;\n    MatExpr col(int x) const;\n    MatExpr diag(int d = 0) const;\n    MatExpr operator()( const Range& rowRange, const Range& colRange ) const;\n    MatExpr operator()( const Rect& roi ) const;\n\n    MatExpr t() const;\n    MatExpr inv(int method = DECOMP_LU) const;\n    MatExpr mul(const MatExpr& e, double scale=1) const;\n    MatExpr mul(const Mat& m, double scale=1) const;\n\n    Mat cross(const Mat& m) const;\n    double dot(const Mat& m) const;\n\n    const MatOp* op;\n    int flags;\n\n    Mat a, b, c;\n    double alpha, beta;\n    Scalar s;\n};\n\n//! @} core_basic\n\n//! @relates cv::MatExpr\n//! @{\nCV_EXPORTS MatExpr operator + (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator + (const Mat& a, const Scalar& s);\nCV_EXPORTS MatExpr operator + (const Scalar& s, const Mat& a);\nCV_EXPORTS MatExpr operator + (const MatExpr& e, const Mat& m);\nCV_EXPORTS MatExpr operator + (const Mat& m, const MatExpr& e);\nCV_EXPORTS MatExpr operator + (const MatExpr& e, const Scalar& s);\nCV_EXPORTS MatExpr operator + (const Scalar& s, const MatExpr& e);\nCV_EXPORTS MatExpr operator + (const MatExpr& e1, const MatExpr& e2);\n\nCV_EXPORTS MatExpr operator - (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator - (const Mat& a, const Scalar& s);\nCV_EXPORTS MatExpr operator - (const Scalar& s, const Mat& a);\nCV_EXPORTS MatExpr operator - (const MatExpr& e, const Mat& m);\nCV_EXPORTS MatExpr operator - (const Mat& m, const MatExpr& e);\nCV_EXPORTS MatExpr operator - (const MatExpr& e, const Scalar& s);\nCV_EXPORTS MatExpr operator - (const Scalar& s, const MatExpr& e);\nCV_EXPORTS MatExpr operator - (const MatExpr& e1, const MatExpr& e2);\n\nCV_EXPORTS MatExpr operator - (const Mat& m);\nCV_EXPORTS MatExpr operator - (const MatExpr& e);\n\nCV_EXPORTS MatExpr operator * (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator * (const Mat& a, double s);\nCV_EXPORTS MatExpr operator * (double s, const Mat& a);\nCV_EXPORTS MatExpr operator * (const MatExpr& e, const Mat& m);\nCV_EXPORTS MatExpr operator * (const Mat& m, const MatExpr& e);\nCV_EXPORTS MatExpr operator * (const MatExpr& e, double s);\nCV_EXPORTS MatExpr operator * (double s, const MatExpr& e);\nCV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2);\n\nCV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator / (const Mat& a, double s);\nCV_EXPORTS MatExpr operator / (double s, const Mat& a);\nCV_EXPORTS MatExpr operator / (const MatExpr& e, const Mat& m);\nCV_EXPORTS MatExpr operator / (const Mat& m, const MatExpr& e);\nCV_EXPORTS MatExpr operator / (const MatExpr& e, double s);\nCV_EXPORTS MatExpr operator / (double s, const MatExpr& e);\nCV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2);\n\nCV_EXPORTS MatExpr operator < (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator < (const Mat& a, double s);\nCV_EXPORTS MatExpr operator < (double s, const Mat& a);\n\nCV_EXPORTS MatExpr operator <= (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator <= (const Mat& a, double s);\nCV_EXPORTS MatExpr operator <= (double s, const Mat& a);\n\nCV_EXPORTS MatExpr operator == (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator == (const Mat& a, double s);\nCV_EXPORTS MatExpr operator == (double s, const Mat& a);\n\nCV_EXPORTS MatExpr operator != (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator != (const Mat& a, double s);\nCV_EXPORTS MatExpr operator != (double s, const Mat& a);\n\nCV_EXPORTS MatExpr operator >= (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator >= (const Mat& a, double s);\nCV_EXPORTS MatExpr operator >= (double s, const Mat& a);\n\nCV_EXPORTS MatExpr operator > (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator > (const Mat& a, double s);\nCV_EXPORTS MatExpr operator > (double s, const Mat& a);\n\nCV_EXPORTS MatExpr operator & (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator & (const Mat& a, const Scalar& s);\nCV_EXPORTS MatExpr operator & (const Scalar& s, const Mat& a);\n\nCV_EXPORTS MatExpr operator | (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator | (const Mat& a, const Scalar& s);\nCV_EXPORTS MatExpr operator | (const Scalar& s, const Mat& a);\n\nCV_EXPORTS MatExpr operator ^ (const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr operator ^ (const Mat& a, const Scalar& s);\nCV_EXPORTS MatExpr operator ^ (const Scalar& s, const Mat& a);\n\nCV_EXPORTS MatExpr operator ~(const Mat& m);\n\nCV_EXPORTS MatExpr min(const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr min(const Mat& a, double s);\nCV_EXPORTS MatExpr min(double s, const Mat& a);\n\nCV_EXPORTS MatExpr max(const Mat& a, const Mat& b);\nCV_EXPORTS MatExpr max(const Mat& a, double s);\nCV_EXPORTS MatExpr max(double s, const Mat& a);\n\n/** @brief Calculates an absolute value of each matrix element.\n\nabs is a meta-function that is expanded to one of absdiff or convertScaleAbs forms:\n- C = abs(A-B) is equivalent to `absdiff(A, B, C)`\n- C = abs(A) is equivalent to `absdiff(A, Scalar::all(0), C)`\n- C = `Mat_<Vec<uchar,n> >(abs(A*alpha + beta))` is equivalent to `convertScaleAbs(A, C, alpha,\nbeta)`\n\nThe output matrix has the same size and the same type as the input one except for the last case,\nwhere C is depth=CV_8U .\n@param m matrix.\n@sa @ref MatrixExpressions, absdiff, convertScaleAbs\n */\nCV_EXPORTS MatExpr abs(const Mat& m);\n/** @overload\n@param e matrix expression.\n*/\nCV_EXPORTS MatExpr abs(const MatExpr& e);\n//! @} relates cv::MatExpr\n\n} // cv\n\n#include \"opencv2/core/mat.inl.hpp\"\n\n#endif // __OPENCV_CORE_MAT_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/mat.inl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_MATRIX_OPERATIONS_HPP__\n#define __OPENCV_CORE_MATRIX_OPERATIONS_HPP__\n\n#ifndef __cplusplus\n#  error mat.inl.hpp header must be compiled as C++\n#endif\n\nnamespace cv\n{\n\n//! @cond IGNORED\n\n//////////////////////// Input/Output Arrays ////////////////////////\n\ninline void _InputArray::init(int _flags, const void* _obj)\n{ flags = _flags; obj = (void*)_obj; }\n\ninline void _InputArray::init(int _flags, const void* _obj, Size _sz)\n{ flags = _flags; obj = (void*)_obj; sz = _sz; }\n\ninline void* _InputArray::getObj() const { return obj; }\ninline int _InputArray::getFlags() const { return flags; }\ninline Size _InputArray::getSz() const { return sz; }\n\ninline _InputArray::_InputArray() { init(NONE, 0); }\ninline _InputArray::_InputArray(int _flags, void* _obj) { init(_flags, _obj); }\ninline _InputArray::_InputArray(const Mat& m) { init(MAT+ACCESS_READ, &m); }\ninline _InputArray::_InputArray(const std::vector<Mat>& vec) { init(STD_VECTOR_MAT+ACCESS_READ, &vec); }\ninline _InputArray::_InputArray(const UMat& m) { init(UMAT+ACCESS_READ, &m); }\ninline _InputArray::_InputArray(const std::vector<UMat>& vec) { init(STD_VECTOR_UMAT+ACCESS_READ, &vec); }\n\ntemplate<typename _Tp> inline\n_InputArray::_InputArray(const std::vector<_Tp>& vec)\n{ init(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type + ACCESS_READ, &vec); }\n\ninline\n_InputArray::_InputArray(const std::vector<bool>& vec)\n{ init(FIXED_TYPE + STD_BOOL_VECTOR + DataType<bool>::type + ACCESS_READ, &vec); }\n\ntemplate<typename _Tp> inline\n_InputArray::_InputArray(const std::vector<std::vector<_Tp> >& vec)\n{ init(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type + ACCESS_READ, &vec); }\n\ntemplate<typename _Tp> inline\n_InputArray::_InputArray(const std::vector<Mat_<_Tp> >& vec)\n{ init(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type + ACCESS_READ, &vec); }\n\ntemplate<typename _Tp, int m, int n> inline\n_InputArray::_InputArray(const Matx<_Tp, m, n>& mtx)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_READ, &mtx, Size(n, m)); }\n\ntemplate<typename _Tp> inline\n_InputArray::_InputArray(const _Tp* vec, int n)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_READ, vec, Size(n, 1)); }\n\ntemplate<typename _Tp> inline\n_InputArray::_InputArray(const Mat_<_Tp>& m)\n{ init(FIXED_TYPE + MAT + DataType<_Tp>::type + ACCESS_READ, &m); }\n\ninline _InputArray::_InputArray(const double& val)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F + ACCESS_READ, &val, Size(1,1)); }\n\ninline _InputArray::_InputArray(const MatExpr& expr)\n{ init(FIXED_TYPE + FIXED_SIZE + EXPR + ACCESS_READ, &expr); }\n\ninline _InputArray::_InputArray(const cuda::GpuMat& d_mat)\n{ init(CUDA_GPU_MAT + ACCESS_READ, &d_mat); }\n\ninline _InputArray::_InputArray(const std::vector<cuda::GpuMat>& d_mat)\n{\tinit(STD_VECTOR_CUDA_GPU_MAT + ACCESS_READ, &d_mat);}\n\ninline _InputArray::_InputArray(const ogl::Buffer& buf)\n{ init(OPENGL_BUFFER + ACCESS_READ, &buf); }\n\ninline _InputArray::_InputArray(const cuda::HostMem& cuda_mem)\n{ init(CUDA_HOST_MEM + ACCESS_READ, &cuda_mem); }\n\ninline _InputArray::~_InputArray() {}\n\ninline Mat _InputArray::getMat(int i) const\n{\n    if( kind() == MAT && i < 0 )\n        return *(const Mat*)obj;\n    return getMat_(i);\n}\n\ninline bool _InputArray::isMat() const { return kind() == _InputArray::MAT; }\ninline bool _InputArray::isUMat() const  { return kind() == _InputArray::UMAT; }\ninline bool _InputArray::isMatVector() const { return kind() == _InputArray::STD_VECTOR_MAT; }\ninline bool _InputArray::isUMatVector() const  { return kind() == _InputArray::STD_VECTOR_UMAT; }\ninline bool _InputArray::isMatx() const { return kind() == _InputArray::MATX; }\ninline bool _InputArray::isVector() const { return kind() == _InputArray::STD_VECTOR || kind() == _InputArray::STD_BOOL_VECTOR; }\ninline bool _InputArray::isGpuMatVector() const { return kind() == _InputArray::STD_VECTOR_CUDA_GPU_MAT; }\n\n////////////////////////////////////////////////////////////////////////////////////////\n\ninline _OutputArray::_OutputArray() { init(ACCESS_WRITE, 0); }\ninline _OutputArray::_OutputArray(int _flags, void* _obj) { init(_flags|ACCESS_WRITE, _obj); }\ninline _OutputArray::_OutputArray(Mat& m) { init(MAT+ACCESS_WRITE, &m); }\ninline _OutputArray::_OutputArray(std::vector<Mat>& vec) { init(STD_VECTOR_MAT+ACCESS_WRITE, &vec); }\ninline _OutputArray::_OutputArray(UMat& m) { init(UMAT+ACCESS_WRITE, &m); }\ninline _OutputArray::_OutputArray(std::vector<UMat>& vec) { init(STD_VECTOR_UMAT+ACCESS_WRITE, &vec); }\n\ntemplate<typename _Tp> inline\n_OutputArray::_OutputArray(std::vector<_Tp>& vec)\n{ init(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type + ACCESS_WRITE, &vec); }\n\ninline\n_OutputArray::_OutputArray(std::vector<bool>&)\n{ CV_Error(Error::StsUnsupportedFormat, \"std::vector<bool> cannot be an output array\\n\"); }\n\ntemplate<typename _Tp> inline\n_OutputArray::_OutputArray(std::vector<std::vector<_Tp> >& vec)\n{ init(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type + ACCESS_WRITE, &vec); }\n\ntemplate<typename _Tp> inline\n_OutputArray::_OutputArray(std::vector<Mat_<_Tp> >& vec)\n{ init(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type + ACCESS_WRITE, &vec); }\n\ntemplate<typename _Tp> inline\n_OutputArray::_OutputArray(Mat_<_Tp>& m)\n{ init(FIXED_TYPE + MAT + DataType<_Tp>::type + ACCESS_WRITE, &m); }\n\ntemplate<typename _Tp, int m, int n> inline\n_OutputArray::_OutputArray(Matx<_Tp, m, n>& mtx)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_WRITE, &mtx, Size(n, m)); }\n\ntemplate<typename _Tp> inline\n_OutputArray::_OutputArray(_Tp* vec, int n)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_WRITE, vec, Size(n, 1)); }\n\ntemplate<typename _Tp> inline\n_OutputArray::_OutputArray(const std::vector<_Tp>& vec)\n{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR + DataType<_Tp>::type + ACCESS_WRITE, &vec); }\n\ntemplate<typename _Tp> inline\n_OutputArray::_OutputArray(const std::vector<std::vector<_Tp> >& vec)\n{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_VECTOR + DataType<_Tp>::type + ACCESS_WRITE, &vec); }\n\ntemplate<typename _Tp> inline\n_OutputArray::_OutputArray(const std::vector<Mat_<_Tp> >& vec)\n{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_MAT + DataType<_Tp>::type + ACCESS_WRITE, &vec); }\n\ntemplate<typename _Tp> inline\n_OutputArray::_OutputArray(const Mat_<_Tp>& m)\n{ init(FIXED_TYPE + FIXED_SIZE + MAT + DataType<_Tp>::type + ACCESS_WRITE, &m); }\n\ntemplate<typename _Tp, int m, int n> inline\n_OutputArray::_OutputArray(const Matx<_Tp, m, n>& mtx)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_WRITE, &mtx, Size(n, m)); }\n\ntemplate<typename _Tp> inline\n_OutputArray::_OutputArray(const _Tp* vec, int n)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_WRITE, vec, Size(n, 1)); }\n\ninline _OutputArray::_OutputArray(cuda::GpuMat& d_mat)\n{ init(CUDA_GPU_MAT + ACCESS_WRITE, &d_mat); }\n\ninline _OutputArray::_OutputArray(std::vector<cuda::GpuMat>& d_mat)\n{\tinit(STD_VECTOR_CUDA_GPU_MAT + ACCESS_WRITE, &d_mat);}\n\ninline _OutputArray::_OutputArray(ogl::Buffer& buf)\n{ init(OPENGL_BUFFER + ACCESS_WRITE, &buf); }\n\ninline _OutputArray::_OutputArray(cuda::HostMem& cuda_mem)\n{ init(CUDA_HOST_MEM + ACCESS_WRITE, &cuda_mem); }\n\ninline _OutputArray::_OutputArray(const Mat& m)\n{ init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_WRITE, &m); }\n\ninline _OutputArray::_OutputArray(const std::vector<Mat>& vec)\n{ init(FIXED_SIZE + STD_VECTOR_MAT + ACCESS_WRITE, &vec); }\n\ninline _OutputArray::_OutputArray(const UMat& m)\n{ init(FIXED_TYPE + FIXED_SIZE + UMAT + ACCESS_WRITE, &m); }\n\ninline _OutputArray::_OutputArray(const std::vector<UMat>& vec)\n{ init(FIXED_SIZE + STD_VECTOR_UMAT + ACCESS_WRITE, &vec); }\n\ninline _OutputArray::_OutputArray(const cuda::GpuMat& d_mat)\n{ init(FIXED_TYPE + FIXED_SIZE + CUDA_GPU_MAT + ACCESS_WRITE, &d_mat); }\n\n\ninline _OutputArray::_OutputArray(const ogl::Buffer& buf)\n{ init(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER + ACCESS_WRITE, &buf); }\n\ninline _OutputArray::_OutputArray(const cuda::HostMem& cuda_mem)\n{ init(FIXED_TYPE + FIXED_SIZE + CUDA_HOST_MEM + ACCESS_WRITE, &cuda_mem); }\n\n///////////////////////////////////////////////////////////////////////////////////////////\n\ninline _InputOutputArray::_InputOutputArray() { init(ACCESS_RW, 0); }\ninline _InputOutputArray::_InputOutputArray(int _flags, void* _obj) { init(_flags|ACCESS_RW, _obj); }\ninline _InputOutputArray::_InputOutputArray(Mat& m) { init(MAT+ACCESS_RW, &m); }\ninline _InputOutputArray::_InputOutputArray(std::vector<Mat>& vec) { init(STD_VECTOR_MAT+ACCESS_RW, &vec); }\ninline _InputOutputArray::_InputOutputArray(UMat& m) { init(UMAT+ACCESS_RW, &m); }\ninline _InputOutputArray::_InputOutputArray(std::vector<UMat>& vec) { init(STD_VECTOR_UMAT+ACCESS_RW, &vec); }\n\ntemplate<typename _Tp> inline\n_InputOutputArray::_InputOutputArray(std::vector<_Tp>& vec)\n{ init(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type + ACCESS_RW, &vec); }\n\ninline _InputOutputArray::_InputOutputArray(std::vector<bool>&)\n{ CV_Error(Error::StsUnsupportedFormat, \"std::vector<bool> cannot be an input/output array\\n\"); }\n\ntemplate<typename _Tp> inline\n_InputOutputArray::_InputOutputArray(std::vector<std::vector<_Tp> >& vec)\n{ init(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type + ACCESS_RW, &vec); }\n\ntemplate<typename _Tp> inline\n_InputOutputArray::_InputOutputArray(std::vector<Mat_<_Tp> >& vec)\n{ init(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type + ACCESS_RW, &vec); }\n\ntemplate<typename _Tp> inline\n_InputOutputArray::_InputOutputArray(Mat_<_Tp>& m)\n{ init(FIXED_TYPE + MAT + DataType<_Tp>::type + ACCESS_RW, &m); }\n\ntemplate<typename _Tp, int m, int n> inline\n_InputOutputArray::_InputOutputArray(Matx<_Tp, m, n>& mtx)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_RW, &mtx, Size(n, m)); }\n\ntemplate<typename _Tp> inline\n_InputOutputArray::_InputOutputArray(_Tp* vec, int n)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_RW, vec, Size(n, 1)); }\n\ntemplate<typename _Tp> inline\n_InputOutputArray::_InputOutputArray(const std::vector<_Tp>& vec)\n{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR + DataType<_Tp>::type + ACCESS_RW, &vec); }\n\ntemplate<typename _Tp> inline\n_InputOutputArray::_InputOutputArray(const std::vector<std::vector<_Tp> >& vec)\n{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_VECTOR + DataType<_Tp>::type + ACCESS_RW, &vec); }\n\ntemplate<typename _Tp> inline\n_InputOutputArray::_InputOutputArray(const std::vector<Mat_<_Tp> >& vec)\n{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_MAT + DataType<_Tp>::type + ACCESS_RW, &vec); }\n\ntemplate<typename _Tp> inline\n_InputOutputArray::_InputOutputArray(const Mat_<_Tp>& m)\n{ init(FIXED_TYPE + FIXED_SIZE + MAT + DataType<_Tp>::type + ACCESS_RW, &m); }\n\ntemplate<typename _Tp, int m, int n> inline\n_InputOutputArray::_InputOutputArray(const Matx<_Tp, m, n>& mtx)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_RW, &mtx, Size(n, m)); }\n\ntemplate<typename _Tp> inline\n_InputOutputArray::_InputOutputArray(const _Tp* vec, int n)\n{ init(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type + ACCESS_RW, vec, Size(n, 1)); }\n\ninline _InputOutputArray::_InputOutputArray(cuda::GpuMat& d_mat)\n{ init(CUDA_GPU_MAT + ACCESS_RW, &d_mat); }\n\ninline _InputOutputArray::_InputOutputArray(ogl::Buffer& buf)\n{ init(OPENGL_BUFFER + ACCESS_RW, &buf); }\n\ninline _InputOutputArray::_InputOutputArray(cuda::HostMem& cuda_mem)\n{ init(CUDA_HOST_MEM + ACCESS_RW, &cuda_mem); }\n\ninline _InputOutputArray::_InputOutputArray(const Mat& m)\n{ init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_RW, &m); }\n\ninline _InputOutputArray::_InputOutputArray(const std::vector<Mat>& vec)\n{ init(FIXED_SIZE + STD_VECTOR_MAT + ACCESS_RW, &vec); }\n\ninline _InputOutputArray::_InputOutputArray(const UMat& m)\n{ init(FIXED_TYPE + FIXED_SIZE + UMAT + ACCESS_RW, &m); }\n\ninline _InputOutputArray::_InputOutputArray(const std::vector<UMat>& vec)\n{ init(FIXED_SIZE + STD_VECTOR_UMAT + ACCESS_RW, &vec); }\n\ninline _InputOutputArray::_InputOutputArray(const cuda::GpuMat& d_mat)\n{ init(FIXED_TYPE + FIXED_SIZE + CUDA_GPU_MAT + ACCESS_RW, &d_mat); }\ninline _InputOutputArray::_InputOutputArray(const std::vector<cuda::GpuMat>& d_mat)\n{\tinit(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_CUDA_GPU_MAT + ACCESS_RW, &d_mat);}\n\ninline _InputOutputArray::_InputOutputArray(const ogl::Buffer& buf)\n{ init(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER + ACCESS_RW, &buf); }\n\ninline _InputOutputArray::_InputOutputArray(const cuda::HostMem& cuda_mem)\n{ init(FIXED_TYPE + FIXED_SIZE + CUDA_HOST_MEM + ACCESS_RW, &cuda_mem); }\n\n//////////////////////////////////////////// Mat //////////////////////////////////////////\n\ninline\nMat::Mat()\n    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),\n      datalimit(0), allocator(0), u(0), size(&rows)\n{}\n\ninline\nMat::Mat(int _rows, int _cols, int _type)\n    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),\n      datalimit(0), allocator(0), u(0), size(&rows)\n{\n    create(_rows, _cols, _type);\n}\n\ninline\nMat::Mat(int _rows, int _cols, int _type, const Scalar& _s)\n    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),\n      datalimit(0), allocator(0), u(0), size(&rows)\n{\n    create(_rows, _cols, _type);\n    *this = _s;\n}\n\ninline\nMat::Mat(Size _sz, int _type)\n    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),\n      datalimit(0), allocator(0), u(0), size(&rows)\n{\n    create( _sz.height, _sz.width, _type );\n}\n\ninline\nMat::Mat(Size _sz, int _type, const Scalar& _s)\n    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),\n      datalimit(0), allocator(0), u(0), size(&rows)\n{\n    create(_sz.height, _sz.width, _type);\n    *this = _s;\n}\n\ninline\nMat::Mat(int _dims, const int* _sz, int _type)\n    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),\n      datalimit(0), allocator(0), u(0), size(&rows)\n{\n    create(_dims, _sz, _type);\n}\n\ninline\nMat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s)\n    : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),\n      datalimit(0), allocator(0), u(0), size(&rows)\n{\n    create(_dims, _sz, _type);\n    *this = _s;\n}\n\ninline\nMat::Mat(const Mat& m)\n    : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),\n      datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator),\n      u(m.u), size(&rows)\n{\n    if( u )\n        CV_XADD(&u->refcount, 1);\n    if( m.dims <= 2 )\n    {\n        step[0] = m.step[0]; step[1] = m.step[1];\n    }\n    else\n    {\n        dims = 0;\n        copySize(m);\n    }\n}\n\ninline\nMat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step)\n    : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols),\n      data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0),\n      allocator(0), u(0), size(&rows)\n{\n    CV_Assert(total() == 0 || data != NULL);\n\n    size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type);\n    size_t minstep = cols * esz;\n    if( _step == AUTO_STEP )\n    {\n        _step = minstep;\n        flags |= CONTINUOUS_FLAG;\n    }\n    else\n    {\n        if( rows == 1 ) _step = minstep;\n        CV_DbgAssert( _step >= minstep );\n\n        if (_step % esz1 != 0)\n        {\n            CV_Error(Error::BadStep, \"Step must be a multiple of esz1\");\n        }\n\n        flags |= _step == minstep ? CONTINUOUS_FLAG : 0;\n    }\n    step[0] = _step;\n    step[1] = esz;\n    datalimit = datastart + _step * rows;\n    dataend = datalimit - _step + minstep;\n}\n\ninline\nMat::Mat(Size _sz, int _type, void* _data, size_t _step)\n    : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width),\n      data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0),\n      allocator(0), u(0), size(&rows)\n{\n    CV_Assert(total() == 0 || data != NULL);\n\n    size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type);\n    size_t minstep = cols*esz;\n    if( _step == AUTO_STEP )\n    {\n        _step = minstep;\n        flags |= CONTINUOUS_FLAG;\n    }\n    else\n    {\n        if( rows == 1 ) _step = minstep;\n        CV_DbgAssert( _step >= minstep );\n\n        if (_step % esz1 != 0)\n        {\n            CV_Error(Error::BadStep, \"Step must be a multiple of esz1\");\n        }\n\n        flags |= _step == minstep ? CONTINUOUS_FLAG : 0;\n    }\n    step[0] = _step;\n    step[1] = esz;\n    datalimit = datastart + _step*rows;\n    dataend = datalimit - _step + minstep;\n}\n\ntemplate<typename _Tp> inline\nMat::Mat(const std::vector<_Tp>& vec, bool copyData)\n    : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()),\n      cols(1), data(0), datastart(0), dataend(0), allocator(0), u(0), size(&rows)\n{\n    if(vec.empty())\n        return;\n    if( !copyData )\n    {\n        step[0] = step[1] = sizeof(_Tp);\n        datastart = data = (uchar*)&vec[0];\n        datalimit = dataend = datastart + rows * step[0];\n    }\n    else\n        Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this);\n}\n\ntemplate<typename _Tp, int n> inline\nMat::Mat(const Vec<_Tp, n>& vec, bool copyData)\n    : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows(n), cols(1), data(0),\n      datastart(0), dataend(0), allocator(0), u(0), size(&rows)\n{\n    if( !copyData )\n    {\n        step[0] = step[1] = sizeof(_Tp);\n        datastart = data = (uchar*)vec.val;\n        datalimit = dataend = datastart + rows * step[0];\n    }\n    else\n        Mat(n, 1, DataType<_Tp>::type, (void*)vec.val).copyTo(*this);\n}\n\n\ntemplate<typename _Tp, int m, int n> inline\nMat::Mat(const Matx<_Tp,m,n>& M, bool copyData)\n    : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows(m), cols(n), data(0),\n      datastart(0), dataend(0), allocator(0), u(0), size(&rows)\n{\n    if( !copyData )\n    {\n        step[0] = cols * sizeof(_Tp);\n        step[1] = sizeof(_Tp);\n        datastart = data = (uchar*)M.val;\n        datalimit = dataend = datastart + rows * step[0];\n    }\n    else\n        Mat(m, n, DataType<_Tp>::type, (uchar*)M.val).copyTo(*this);\n}\n\ntemplate<typename _Tp> inline\nMat::Mat(const Point_<_Tp>& pt, bool copyData)\n    : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows(2), cols(1), data(0),\n      datastart(0), dataend(0), allocator(0), u(0), size(&rows)\n{\n    if( !copyData )\n    {\n        step[0] = step[1] = sizeof(_Tp);\n        datastart = data = (uchar*)&pt.x;\n        datalimit = dataend = datastart + rows * step[0];\n    }\n    else\n    {\n        create(2, 1, DataType<_Tp>::type);\n        ((_Tp*)data)[0] = pt.x;\n        ((_Tp*)data)[1] = pt.y;\n    }\n}\n\ntemplate<typename _Tp> inline\nMat::Mat(const Point3_<_Tp>& pt, bool copyData)\n    : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows(3), cols(1), data(0),\n      datastart(0), dataend(0), allocator(0), u(0), size(&rows)\n{\n    if( !copyData )\n    {\n        step[0] = step[1] = sizeof(_Tp);\n        datastart = data = (uchar*)&pt.x;\n        datalimit = dataend = datastart + rows * step[0];\n    }\n    else\n    {\n        create(3, 1, DataType<_Tp>::type);\n        ((_Tp*)data)[0] = pt.x;\n        ((_Tp*)data)[1] = pt.y;\n        ((_Tp*)data)[2] = pt.z;\n    }\n}\n\ntemplate<typename _Tp> inline\nMat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer)\n    : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(0), rows(0), cols(0), data(0),\n      datastart(0), dataend(0), allocator(0), u(0), size(&rows)\n{\n    *this = commaInitializer.operator Mat_<_Tp>();\n}\n\ninline\nMat::~Mat()\n{\n    release();\n    if( step.p != step.buf )\n        fastFree(step.p);\n}\n\ninline\nMat& Mat::operator = (const Mat& m)\n{\n    if( this != &m )\n    {\n        if( m.u )\n            CV_XADD(&m.u->refcount, 1);\n        release();\n        flags = m.flags;\n        if( dims <= 2 && m.dims <= 2 )\n        {\n            dims = m.dims;\n            rows = m.rows;\n            cols = m.cols;\n            step[0] = m.step[0];\n            step[1] = m.step[1];\n        }\n        else\n            copySize(m);\n        data = m.data;\n        datastart = m.datastart;\n        dataend = m.dataend;\n        datalimit = m.datalimit;\n        allocator = m.allocator;\n        u = m.u;\n    }\n    return *this;\n}\n\ninline\nMat Mat::row(int y) const\n{\n    return Mat(*this, Range(y, y + 1), Range::all());\n}\n\ninline\nMat Mat::col(int x) const\n{\n    return Mat(*this, Range::all(), Range(x, x + 1));\n}\n\ninline\nMat Mat::rowRange(int startrow, int endrow) const\n{\n    return Mat(*this, Range(startrow, endrow), Range::all());\n}\n\ninline\nMat Mat::rowRange(const Range& r) const\n{\n    return Mat(*this, r, Range::all());\n}\n\ninline\nMat Mat::colRange(int startcol, int endcol) const\n{\n    return Mat(*this, Range::all(), Range(startcol, endcol));\n}\n\ninline\nMat Mat::colRange(const Range& r) const\n{\n    return Mat(*this, Range::all(), r);\n}\n\ninline\nMat Mat::clone() const\n{\n    Mat m;\n    copyTo(m);\n    return m;\n}\n\ninline\nvoid Mat::assignTo( Mat& m, int _type ) const\n{\n    if( _type < 0 )\n        m = *this;\n    else\n        convertTo(m, _type);\n}\n\ninline\nvoid Mat::create(int _rows, int _cols, int _type)\n{\n    _type &= TYPE_MASK;\n    if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data )\n        return;\n    int sz[] = {_rows, _cols};\n    create(2, sz, _type);\n}\n\ninline\nvoid Mat::create(Size _sz, int _type)\n{\n    create(_sz.height, _sz.width, _type);\n}\n\ninline\nvoid Mat::addref()\n{\n    if( u )\n        CV_XADD(&u->refcount, 1);\n}\n\ninline void Mat::release()\n{\n    if( u && CV_XADD(&u->refcount, -1) == 1 )\n        deallocate();\n    u = NULL;\n    datastart = dataend = datalimit = data = 0;\n    for(int i = 0; i < dims; i++)\n        size.p[i] = 0;\n}\n\ninline\nMat Mat::operator()( Range _rowRange, Range _colRange ) const\n{\n    return Mat(*this, _rowRange, _colRange);\n}\n\ninline\nMat Mat::operator()( const Rect& roi ) const\n{\n    return Mat(*this, roi);\n}\n\ninline\nMat Mat::operator()(const Range* ranges) const\n{\n    return Mat(*this, ranges);\n}\n\ninline\nbool Mat::isContinuous() const\n{\n    return (flags & CONTINUOUS_FLAG) != 0;\n}\n\ninline\nbool Mat::isSubmatrix() const\n{\n    return (flags & SUBMATRIX_FLAG) != 0;\n}\n\ninline\nsize_t Mat::elemSize() const\n{\n    return dims > 0 ? step.p[dims - 1] : 0;\n}\n\ninline\nsize_t Mat::elemSize1() const\n{\n    return CV_ELEM_SIZE1(flags);\n}\n\ninline\nint Mat::type() const\n{\n    return CV_MAT_TYPE(flags);\n}\n\ninline\nint Mat::depth() const\n{\n    return CV_MAT_DEPTH(flags);\n}\n\ninline\nint Mat::channels() const\n{\n    return CV_MAT_CN(flags);\n}\n\ninline\nsize_t Mat::step1(int i) const\n{\n    return step.p[i] / elemSize1();\n}\n\ninline\nbool Mat::empty() const\n{\n    return data == 0 || total() == 0;\n}\n\ninline\nsize_t Mat::total() const\n{\n    if( dims <= 2 )\n        return (size_t)rows * cols;\n    size_t p = 1;\n    for( int i = 0; i < dims; i++ )\n        p *= size[i];\n    return p;\n}\n\ninline\nuchar* Mat::ptr(int y)\n{\n    CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) );\n    return data + step.p[0] * y;\n}\n\ninline\nconst uchar* Mat::ptr(int y) const\n{\n    CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) );\n    return data + step.p[0] * y;\n}\n\ntemplate<typename _Tp> inline\n_Tp* Mat::ptr(int y)\n{\n    CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) );\n    return (_Tp*)(data + step.p[0] * y);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp* Mat::ptr(int y) const\n{\n    CV_DbgAssert( y == 0 || (data && dims >= 1 && data && (unsigned)y < (unsigned)size.p[0]) );\n    return (const _Tp*)(data + step.p[0] * y);\n}\n\ninline\nuchar* Mat::ptr(int i0, int i1)\n{\n    CV_DbgAssert(dims >= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]);\n    return data + i0 * step.p[0] + i1 * step.p[1];\n}\n\ninline\nconst uchar* Mat::ptr(int i0, int i1) const\n{\n    CV_DbgAssert(dims >= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]);\n    return data + i0 * step.p[0] + i1 * step.p[1];\n}\n\ntemplate<typename _Tp> inline\n_Tp* Mat::ptr(int i0, int i1)\n{\n    CV_DbgAssert(dims >= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]);\n    return (_Tp*)(data + i0 * step.p[0] + i1 * step.p[1]);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp* Mat::ptr(int i0, int i1) const\n{\n    CV_DbgAssert(dims >= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]);\n    return (const _Tp*)(data + i0 * step.p[0] + i1 * step.p[1]);\n}\n\ninline\nuchar* Mat::ptr(int i0, int i1, int i2)\n{\n    CV_DbgAssert(dims >= 3);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]);\n    CV_DbgAssert((unsigned)i2 < (unsigned)size.p[2]);\n    return data + i0 * step.p[0] + i1 * step.p[1] + i2 * step.p[2];\n}\n\ninline\nconst uchar* Mat::ptr(int i0, int i1, int i2) const\n{\n    CV_DbgAssert(dims >= 3);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]);\n    CV_DbgAssert((unsigned)i2 < (unsigned)size.p[2]);\n    return data + i0 * step.p[0] + i1 * step.p[1] + i2 * step.p[2];\n}\n\ntemplate<typename _Tp> inline\n_Tp* Mat::ptr(int i0, int i1, int i2)\n{\n    CV_DbgAssert(dims >= 3);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]);\n    CV_DbgAssert((unsigned)i2 < (unsigned)size.p[2]);\n    return (_Tp*)(data + i0 * step.p[0] + i1 * step.p[1] + i2 * step.p[2]);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp* Mat::ptr(int i0, int i1, int i2) const\n{\n    CV_DbgAssert(dims >= 3);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]);\n    CV_DbgAssert((unsigned)i2 < (unsigned)size.p[2]);\n    return (const _Tp*)(data + i0 * step.p[0] + i1 * step.p[1] + i2 * step.p[2]);\n}\n\ninline\nuchar* Mat::ptr(const int* idx)\n{\n    int i, d = dims;\n    uchar* p = data;\n    CV_DbgAssert( d >= 1 && p );\n    for( i = 0; i < d; i++ )\n    {\n        CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] );\n        p += idx[i] * step.p[i];\n    }\n    return p;\n}\n\ninline\nconst uchar* Mat::ptr(const int* idx) const\n{\n    int i, d = dims;\n    uchar* p = data;\n    CV_DbgAssert( d >= 1 && p );\n    for( i = 0; i < d; i++ )\n    {\n        CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] );\n        p += idx[i] * step.p[i];\n    }\n    return p;\n}\n\ntemplate<typename _Tp> inline\n_Tp& Mat::at(int i0, int i1)\n{\n    CV_DbgAssert(dims <= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)(i1 * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels()));\n    CV_DbgAssert(CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());\n    return ((_Tp*)(data + step.p[0] * i0))[i1];\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& Mat::at(int i0, int i1) const\n{\n    CV_DbgAssert(dims <= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)(i1 * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels()));\n    CV_DbgAssert(CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());\n    return ((const _Tp*)(data + step.p[0] * i0))[i1];\n}\n\ntemplate<typename _Tp> inline\n_Tp& Mat::at(Point pt)\n{\n    CV_DbgAssert(dims <= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)pt.y < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)(pt.x * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels()));\n    CV_DbgAssert(CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());\n    return ((_Tp*)(data + step.p[0] * pt.y))[pt.x];\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& Mat::at(Point pt) const\n{\n    CV_DbgAssert(dims <= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)pt.y < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)(pt.x * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels()));\n    CV_DbgAssert(CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());\n    return ((const _Tp*)(data + step.p[0] * pt.y))[pt.x];\n}\n\ntemplate<typename _Tp> inline\n_Tp& Mat::at(int i0)\n{\n    CV_DbgAssert(dims <= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)(size.p[0] * size.p[1]));\n    CV_DbgAssert(elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type));\n    if( isContinuous() || size.p[0] == 1 )\n        return ((_Tp*)data)[i0];\n    if( size.p[1] == 1 )\n        return *(_Tp*)(data + step.p[0] * i0);\n    int i = i0 / cols, j = i0 - i * cols;\n    return ((_Tp*)(data + step.p[0] * i))[j];\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& Mat::at(int i0) const\n{\n    CV_DbgAssert(dims <= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)(size.p[0] * size.p[1]));\n    CV_DbgAssert(elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type));\n    if( isContinuous() || size.p[0] == 1 )\n        return ((const _Tp*)data)[i0];\n    if( size.p[1] == 1 )\n        return *(const _Tp*)(data + step.p[0] * i0);\n    int i = i0 / cols, j = i0 - i * cols;\n    return ((const _Tp*)(data + step.p[0] * i))[j];\n}\n\ntemplate<typename _Tp> inline\n_Tp& Mat::at(int i0, int i1, int i2)\n{\n    CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );\n    return *(_Tp*)ptr(i0, i1, i2);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& Mat::at(int i0, int i1, int i2) const\n{\n    CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );\n    return *(const _Tp*)ptr(i0, i1, i2);\n}\n\ntemplate<typename _Tp> inline\n_Tp& Mat::at(const int* idx)\n{\n    CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );\n    return *(_Tp*)ptr(idx);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& Mat::at(const int* idx) const\n{\n    CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );\n    return *(const _Tp*)ptr(idx);\n}\n\ntemplate<typename _Tp, int n> inline\n_Tp& Mat::at(const Vec<int, n>& idx)\n{\n    CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );\n    return *(_Tp*)ptr(idx.val);\n}\n\ntemplate<typename _Tp, int n> inline\nconst _Tp& Mat::at(const Vec<int, n>& idx) const\n{\n    CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );\n    return *(const _Tp*)ptr(idx.val);\n}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp> Mat::begin() const\n{\n    CV_DbgAssert( elemSize() == sizeof(_Tp) );\n    return MatConstIterator_<_Tp>((const Mat_<_Tp>*)this);\n}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp> Mat::end() const\n{\n    CV_DbgAssert( elemSize() == sizeof(_Tp) );\n    MatConstIterator_<_Tp> it((const Mat_<_Tp>*)this);\n    it += total();\n    return it;\n}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp> Mat::begin()\n{\n    CV_DbgAssert( elemSize() == sizeof(_Tp) );\n    return MatIterator_<_Tp>((Mat_<_Tp>*)this);\n}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp> Mat::end()\n{\n    CV_DbgAssert( elemSize() == sizeof(_Tp) );\n    MatIterator_<_Tp> it((Mat_<_Tp>*)this);\n    it += total();\n    return it;\n}\n\ntemplate<typename _Tp, typename Functor> inline\nvoid Mat::forEach(const Functor& operation) {\n    this->forEach_impl<_Tp>(operation);\n}\n\ntemplate<typename _Tp, typename Functor> inline\nvoid Mat::forEach(const Functor& operation) const {\n    // call as not const\n    (const_cast<Mat*>(this))->forEach<const _Tp>(operation);\n}\n\ntemplate<typename _Tp> inline\nMat::operator std::vector<_Tp>() const\n{\n    std::vector<_Tp> v;\n    copyTo(v);\n    return v;\n}\n\ntemplate<typename _Tp, int n> inline\nMat::operator Vec<_Tp, n>() const\n{\n    CV_Assert( data && dims <= 2 && (rows == 1 || cols == 1) &&\n               rows + cols - 1 == n && channels() == 1 );\n\n    if( isContinuous() && type() == DataType<_Tp>::type )\n        return Vec<_Tp, n>((_Tp*)data);\n    Vec<_Tp, n> v;\n    Mat tmp(rows, cols, DataType<_Tp>::type, v.val);\n    convertTo(tmp, tmp.type());\n    return v;\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMat::operator Matx<_Tp, m, n>() const\n{\n    CV_Assert( data && dims <= 2 && rows == m && cols == n && channels() == 1 );\n\n    if( isContinuous() && type() == DataType<_Tp>::type )\n        return Matx<_Tp, m, n>((_Tp*)data);\n    Matx<_Tp, m, n> mtx;\n    Mat tmp(rows, cols, DataType<_Tp>::type, mtx.val);\n    convertTo(tmp, tmp.type());\n    return mtx;\n}\n\ntemplate<typename _Tp> inline\nvoid Mat::push_back(const _Tp& elem)\n{\n    if( !data )\n    {\n        *this = Mat(1, 1, DataType<_Tp>::type, (void*)&elem).clone();\n        return;\n    }\n    CV_Assert(DataType<_Tp>::type == type() && cols == 1\n              /* && dims == 2 (cols == 1 implies dims == 2) */);\n    const uchar* tmp = dataend + step[0];\n    if( !isSubmatrix() && isContinuous() && tmp <= datalimit )\n    {\n        *(_Tp*)(data + (size.p[0]++) * step.p[0]) = elem;\n        dataend = tmp;\n    }\n    else\n        push_back_(&elem);\n}\n\ntemplate<typename _Tp> inline\nvoid Mat::push_back(const Mat_<_Tp>& m)\n{\n    push_back((const Mat&)m);\n}\n\ntemplate<> inline\nvoid Mat::push_back(const MatExpr& expr)\n{\n    push_back(static_cast<Mat>(expr));\n}\n\n#ifdef CV_CXX_MOVE_SEMANTICS\n\ninline\nMat::Mat(Mat&& m)\n    : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),\n      datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator),\n      u(m.u), size(&rows)\n{\n    if (m.dims <= 2)  // move new step/size info\n    {\n        step[0] = m.step[0];\n        step[1] = m.step[1];\n    }\n    else\n    {\n        CV_DbgAssert(m.step.p != m.step.buf);\n        step.p = m.step.p;\n        size.p = m.size.p;\n        m.step.p = m.step.buf;\n        m.size.p = &m.rows;\n    }\n    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;\n    m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL;\n    m.allocator = NULL;\n    m.u = NULL;\n}\n\ninline\nMat& Mat::operator = (Mat&& m)\n{\n    release();\n    flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols; data = m.data;\n    datastart = m.datastart; dataend = m.dataend; datalimit = m.datalimit; allocator = m.allocator;\n    u = m.u;\n    if (step.p != step.buf) // release self step/size\n    {\n        fastFree(step.p);\n        step.p = step.buf;\n        size.p = &rows;\n    }\n    if (m.dims <= 2) // move new step/size info\n    {\n        step[0] = m.step[0];\n        step[1] = m.step[1];\n    }\n    else\n    {\n        CV_DbgAssert(m.step.p != m.step.buf);\n        step.p = m.step.p;\n        size.p = m.size.p;\n        m.step.p = m.step.buf;\n        m.size.p = &m.rows;\n    }\n    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;\n    m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL;\n    m.allocator = NULL;\n    m.u = NULL;\n    return *this;\n}\n\n#endif\n\n\n///////////////////////////// MatSize ////////////////////////////\n\ninline\nMatSize::MatSize(int* _p)\n    : p(_p) {}\n\ninline\nSize MatSize::operator()() const\n{\n    CV_DbgAssert(p[-1] <= 2);\n    return Size(p[1], p[0]);\n}\n\ninline\nconst int& MatSize::operator[](int i) const\n{\n    return p[i];\n}\n\ninline\nint& MatSize::operator[](int i)\n{\n    return p[i];\n}\n\ninline\nMatSize::operator const int*() const\n{\n    return p;\n}\n\ninline\nbool MatSize::operator == (const MatSize& sz) const\n{\n    int d = p[-1];\n    int dsz = sz.p[-1];\n    if( d != dsz )\n        return false;\n    if( d == 2 )\n        return p[0] == sz.p[0] && p[1] == sz.p[1];\n\n    for( int i = 0; i < d; i++ )\n        if( p[i] != sz.p[i] )\n            return false;\n    return true;\n}\n\ninline\nbool MatSize::operator != (const MatSize& sz) const\n{\n    return !(*this == sz);\n}\n\n\n\n///////////////////////////// MatStep ////////////////////////////\n\ninline\nMatStep::MatStep()\n{\n    p = buf; p[0] = p[1] = 0;\n}\n\ninline\nMatStep::MatStep(size_t s)\n{\n    p = buf; p[0] = s; p[1] = 0;\n}\n\ninline\nconst size_t& MatStep::operator[](int i) const\n{\n    return p[i];\n}\n\ninline\nsize_t& MatStep::operator[](int i)\n{\n    return p[i];\n}\n\ninline MatStep::operator size_t() const\n{\n    CV_DbgAssert( p == buf );\n    return buf[0];\n}\n\ninline MatStep& MatStep::operator = (size_t s)\n{\n    CV_DbgAssert( p == buf );\n    buf[0] = s;\n    return *this;\n}\n\n\n\n////////////////////////////// Mat_<_Tp> ////////////////////////////\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_()\n    : Mat()\n{\n    flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type;\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(int _rows, int _cols)\n    : Mat(_rows, _cols, DataType<_Tp>::type)\n{\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(int _rows, int _cols, const _Tp& value)\n    : Mat(_rows, _cols, DataType<_Tp>::type)\n{\n    *this = value;\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(Size _sz)\n    : Mat(_sz.height, _sz.width, DataType<_Tp>::type)\n{}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(Size _sz, const _Tp& value)\n    : Mat(_sz.height, _sz.width, DataType<_Tp>::type)\n{\n    *this = value;\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(int _dims, const int* _sz)\n    : Mat(_dims, _sz, DataType<_Tp>::type)\n{}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(int _dims, const int* _sz, const _Tp& _s)\n    : Mat(_dims, _sz, DataType<_Tp>::type, Scalar(_s))\n{}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(const Mat_<_Tp>& m, const Range* ranges)\n    : Mat(m, ranges)\n{}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(const Mat& m)\n    : Mat()\n{\n    flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type;\n    *this = m;\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(const Mat_& m)\n    : Mat(m)\n{}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(int _rows, int _cols, _Tp* _data, size_t steps)\n    : Mat(_rows, _cols, DataType<_Tp>::type, _data, steps)\n{}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(const Mat_& m, const Range& _rowRange, const Range& _colRange)\n    : Mat(m, _rowRange, _colRange)\n{}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(const Mat_& m, const Rect& roi)\n    : Mat(m, roi)\n{}\n\ntemplate<typename _Tp> template<int n> inline\nMat_<_Tp>::Mat_(const Vec<typename DataType<_Tp>::channel_type, n>& vec, bool copyData)\n    : Mat(n / DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&vec)\n{\n    CV_Assert(n%DataType<_Tp>::channels == 0);\n    if( copyData )\n        *this = clone();\n}\n\ntemplate<typename _Tp> template<int m, int n> inline\nMat_<_Tp>::Mat_(const Matx<typename DataType<_Tp>::channel_type, m, n>& M, bool copyData)\n    : Mat(m, n / DataType<_Tp>::channels, DataType<_Tp>::type, (void*)&M)\n{\n    CV_Assert(n % DataType<_Tp>::channels == 0);\n    if( copyData )\n        *this = clone();\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(const Point_<typename DataType<_Tp>::channel_type>& pt, bool copyData)\n    : Mat(2 / DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt)\n{\n    CV_Assert(2 % DataType<_Tp>::channels == 0);\n    if( copyData )\n        *this = clone();\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(const Point3_<typename DataType<_Tp>::channel_type>& pt, bool copyData)\n    : Mat(3 / DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt)\n{\n    CV_Assert(3 % DataType<_Tp>::channels == 0);\n    if( copyData )\n        *this = clone();\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(const MatCommaInitializer_<_Tp>& commaInitializer)\n    : Mat(commaInitializer)\n{}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(const std::vector<_Tp>& vec, bool copyData)\n    : Mat(vec, copyData)\n{}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>& Mat_<_Tp>::operator = (const Mat& m)\n{\n    if( DataType<_Tp>::type == m.type() )\n    {\n        Mat::operator = (m);\n        return *this;\n    }\n    if( DataType<_Tp>::depth == m.depth() )\n    {\n        return (*this = m.reshape(DataType<_Tp>::channels, m.dims, 0));\n    }\n    CV_DbgAssert(DataType<_Tp>::channels == m.channels());\n    m.convertTo(*this, type());\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>& Mat_<_Tp>::operator = (const Mat_& m)\n{\n    Mat::operator=(m);\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>& Mat_<_Tp>::operator = (const _Tp& s)\n{\n    typedef typename DataType<_Tp>::vec_type VT;\n    Mat::operator=(Scalar((const VT&)s));\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nvoid Mat_<_Tp>::create(int _rows, int _cols)\n{\n    Mat::create(_rows, _cols, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nvoid Mat_<_Tp>::create(Size _sz)\n{\n    Mat::create(_sz, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nvoid Mat_<_Tp>::create(int _dims, const int* _sz)\n{\n    Mat::create(_dims, _sz, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp> Mat_<_Tp>::cross(const Mat_& m) const\n{\n    return Mat_<_Tp>(Mat::cross(m));\n}\n\ntemplate<typename _Tp> template<typename T2> inline\nMat_<_Tp>::operator Mat_<T2>() const\n{\n    return Mat_<T2>(*this);\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp> Mat_<_Tp>::row(int y) const\n{\n    return Mat_(*this, Range(y, y+1), Range::all());\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp> Mat_<_Tp>::col(int x) const\n{\n    return Mat_(*this, Range::all(), Range(x, x+1));\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp> Mat_<_Tp>::diag(int d) const\n{\n    return Mat_(Mat::diag(d));\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp> Mat_<_Tp>::clone() const\n{\n    return Mat_(Mat::clone());\n}\n\ntemplate<typename _Tp> inline\nsize_t Mat_<_Tp>::elemSize() const\n{\n    CV_DbgAssert( Mat::elemSize() == sizeof(_Tp) );\n    return sizeof(_Tp);\n}\n\ntemplate<typename _Tp> inline\nsize_t Mat_<_Tp>::elemSize1() const\n{\n    CV_DbgAssert( Mat::elemSize1() == sizeof(_Tp) / DataType<_Tp>::channels );\n    return sizeof(_Tp) / DataType<_Tp>::channels;\n}\n\ntemplate<typename _Tp> inline\nint Mat_<_Tp>::type() const\n{\n    CV_DbgAssert( Mat::type() == DataType<_Tp>::type );\n    return DataType<_Tp>::type;\n}\n\ntemplate<typename _Tp> inline\nint Mat_<_Tp>::depth() const\n{\n    CV_DbgAssert( Mat::depth() == DataType<_Tp>::depth );\n    return DataType<_Tp>::depth;\n}\n\ntemplate<typename _Tp> inline\nint Mat_<_Tp>::channels() const\n{\n    CV_DbgAssert( Mat::channels() == DataType<_Tp>::channels );\n    return DataType<_Tp>::channels;\n}\n\ntemplate<typename _Tp> inline\nsize_t Mat_<_Tp>::stepT(int i) const\n{\n    return step.p[i] / elemSize();\n}\n\ntemplate<typename _Tp> inline\nsize_t Mat_<_Tp>::step1(int i) const\n{\n    return step.p[i] / elemSize1();\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>& Mat_<_Tp>::adjustROI( int dtop, int dbottom, int dleft, int dright )\n{\n    return (Mat_<_Tp>&)(Mat::adjustROI(dtop, dbottom, dleft, dright));\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp> Mat_<_Tp>::operator()( const Range& _rowRange, const Range& _colRange ) const\n{\n    return Mat_<_Tp>(*this, _rowRange, _colRange);\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp> Mat_<_Tp>::operator()( const Rect& roi ) const\n{\n    return Mat_<_Tp>(*this, roi);\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp> Mat_<_Tp>::operator()( const Range* ranges ) const\n{\n    return Mat_<_Tp>(*this, ranges);\n}\n\ntemplate<typename _Tp> inline\n_Tp* Mat_<_Tp>::operator [](int y)\n{\n    CV_DbgAssert( 0 <= y && y < rows );\n    return (_Tp*)(data + y*step.p[0]);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp* Mat_<_Tp>::operator [](int y) const\n{\n    CV_DbgAssert( 0 <= y && y < rows );\n    return (const _Tp*)(data + y*step.p[0]);\n}\n\ntemplate<typename _Tp> inline\n_Tp& Mat_<_Tp>::operator ()(int i0, int i1)\n{\n    CV_DbgAssert(dims <= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]);\n    CV_DbgAssert(type() == DataType<_Tp>::type);\n    return ((_Tp*)(data + step.p[0] * i0))[i1];\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& Mat_<_Tp>::operator ()(int i0, int i1) const\n{\n    CV_DbgAssert(dims <= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]);\n    CV_DbgAssert(type() == DataType<_Tp>::type);\n    return ((const _Tp*)(data + step.p[0] * i0))[i1];\n}\n\ntemplate<typename _Tp> inline\n_Tp& Mat_<_Tp>::operator ()(Point pt)\n{\n    CV_DbgAssert(dims <= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)pt.y < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)pt.x < (unsigned)size.p[1]);\n    CV_DbgAssert(type() == DataType<_Tp>::type);\n    return ((_Tp*)(data + step.p[0] * pt.y))[pt.x];\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& Mat_<_Tp>::operator ()(Point pt) const\n{\n    CV_DbgAssert(dims <= 2);\n    CV_DbgAssert(data);\n    CV_DbgAssert((unsigned)pt.y < (unsigned)size.p[0]);\n    CV_DbgAssert((unsigned)pt.x < (unsigned)size.p[1]);\n    CV_DbgAssert(type() == DataType<_Tp>::type);\n    return ((const _Tp*)(data + step.p[0] * pt.y))[pt.x];\n}\n\ntemplate<typename _Tp> inline\n_Tp& Mat_<_Tp>::operator ()(const int* idx)\n{\n    return Mat::at<_Tp>(idx);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& Mat_<_Tp>::operator ()(const int* idx) const\n{\n    return Mat::at<_Tp>(idx);\n}\n\ntemplate<typename _Tp> template<int n> inline\n_Tp& Mat_<_Tp>::operator ()(const Vec<int, n>& idx)\n{\n    return Mat::at<_Tp>(idx);\n}\n\ntemplate<typename _Tp> template<int n> inline\nconst _Tp& Mat_<_Tp>::operator ()(const Vec<int, n>& idx) const\n{\n    return Mat::at<_Tp>(idx);\n}\n\ntemplate<typename _Tp> inline\n_Tp& Mat_<_Tp>::operator ()(int i0)\n{\n    return this->at<_Tp>(i0);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& Mat_<_Tp>::operator ()(int i0) const\n{\n    return this->at<_Tp>(i0);\n}\n\ntemplate<typename _Tp> inline\n_Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2)\n{\n    return this->at<_Tp>(i0, i1, i2);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) const\n{\n    return this->at<_Tp>(i0, i1, i2);\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::operator std::vector<_Tp>() const\n{\n    std::vector<_Tp> v;\n    copyTo(v);\n    return v;\n}\n\ntemplate<typename _Tp> template<int n> inline\nMat_<_Tp>::operator Vec<typename DataType<_Tp>::channel_type, n>() const\n{\n    CV_Assert(n % DataType<_Tp>::channels == 0);\n\n#if defined _MSC_VER\n    const Mat* pMat = (const Mat*)this; // workaround for MSVS <= 2012 compiler bugs (but GCC 4.6 dislikes this workaround)\n    return pMat->operator Vec<typename DataType<_Tp>::channel_type, n>();\n#else\n    return this->Mat::operator Vec<typename DataType<_Tp>::channel_type, n>();\n#endif\n}\n\ntemplate<typename _Tp> template<int m, int n> inline\nMat_<_Tp>::operator Matx<typename DataType<_Tp>::channel_type, m, n>() const\n{\n    CV_Assert(n % DataType<_Tp>::channels == 0);\n\n#if defined _MSC_VER\n    const Mat* pMat = (const Mat*)this; // workaround for MSVS <= 2012 compiler bugs (but GCC 4.6 dislikes this workaround)\n    Matx<typename DataType<_Tp>::channel_type, m, n> res = pMat->operator Matx<typename DataType<_Tp>::channel_type, m, n>();\n    return res;\n#else\n    Matx<typename DataType<_Tp>::channel_type, m, n> res = this->Mat::operator Matx<typename DataType<_Tp>::channel_type, m, n>();\n    return res;\n#endif\n}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp> Mat_<_Tp>::begin() const\n{\n    return Mat::begin<_Tp>();\n}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp> Mat_<_Tp>::end() const\n{\n    return Mat::end<_Tp>();\n}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp> Mat_<_Tp>::begin()\n{\n    return Mat::begin<_Tp>();\n}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp> Mat_<_Tp>::end()\n{\n    return Mat::end<_Tp>();\n}\n\ntemplate<typename _Tp> template<typename Functor> inline\nvoid Mat_<_Tp>::forEach(const Functor& operation) {\n    Mat::forEach<_Tp, Functor>(operation);\n}\n\ntemplate<typename _Tp> template<typename Functor> inline\nvoid Mat_<_Tp>::forEach(const Functor& operation) const {\n    Mat::forEach<_Tp, Functor>(operation);\n}\n\n#ifdef CV_CXX_MOVE_SEMANTICS\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(Mat_&& m)\n    : Mat(m)\n{\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>& Mat_<_Tp>::operator = (Mat_&& m)\n{\n    Mat::operator = (m);\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(Mat&& m)\n    : Mat()\n{\n    flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type;\n    *this = m;\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>& Mat_<_Tp>::operator = (Mat&& m)\n{\n    if( DataType<_Tp>::type == m.type() )\n    {\n        Mat::operator = ((Mat&&)m);\n        return *this;\n    }\n    if( DataType<_Tp>::depth == m.depth() )\n    {\n        Mat::operator = ((Mat&&)m.reshape(DataType<_Tp>::channels, m.dims, 0));\n        return *this;\n    }\n    CV_DbgAssert(DataType<_Tp>::channels == m.channels());\n    m.convertTo(*this, type());\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(MatExpr&& e)\n    : Mat()\n{\n    flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type;\n    *this = Mat(e);\n}\n\n#endif\n\n///////////////////////////// SparseMat /////////////////////////////\n\ninline\nSparseMat::SparseMat()\n    : flags(MAGIC_VAL), hdr(0)\n{}\n\ninline\nSparseMat::SparseMat(int _dims, const int* _sizes, int _type)\n    : flags(MAGIC_VAL), hdr(0)\n{\n    create(_dims, _sizes, _type);\n}\n\ninline\nSparseMat::SparseMat(const SparseMat& m)\n    : flags(m.flags), hdr(m.hdr)\n{\n    addref();\n}\n\ninline\nSparseMat::~SparseMat()\n{\n    release();\n}\n\ninline\nSparseMat& SparseMat::operator = (const SparseMat& m)\n{\n    if( this != &m )\n    {\n        if( m.hdr )\n            CV_XADD(&m.hdr->refcount, 1);\n        release();\n        flags = m.flags;\n        hdr = m.hdr;\n    }\n    return *this;\n}\n\ninline\nSparseMat& SparseMat::operator = (const Mat& m)\n{\n    return (*this = SparseMat(m));\n}\n\ninline\nSparseMat SparseMat::clone() const\n{\n    SparseMat temp;\n    this->copyTo(temp);\n    return temp;\n}\n\ninline\nvoid SparseMat::assignTo( SparseMat& m, int _type ) const\n{\n    if( _type < 0 )\n        m = *this;\n    else\n        convertTo(m, _type);\n}\n\ninline\nvoid SparseMat::addref()\n{\n    if( hdr )\n        CV_XADD(&hdr->refcount, 1);\n}\n\ninline\nvoid SparseMat::release()\n{\n    if( hdr && CV_XADD(&hdr->refcount, -1) == 1 )\n        delete hdr;\n    hdr = 0;\n}\n\ninline\nsize_t SparseMat::elemSize() const\n{\n    return CV_ELEM_SIZE(flags);\n}\n\ninline\nsize_t SparseMat::elemSize1() const\n{\n    return CV_ELEM_SIZE1(flags);\n}\n\ninline\nint SparseMat::type() const\n{\n    return CV_MAT_TYPE(flags);\n}\n\ninline\nint SparseMat::depth() const\n{\n    return CV_MAT_DEPTH(flags);\n}\n\ninline\nint SparseMat::channels() const\n{\n    return CV_MAT_CN(flags);\n}\n\ninline\nconst int* SparseMat::size() const\n{\n    return hdr ? hdr->size : 0;\n}\n\ninline\nint SparseMat::size(int i) const\n{\n    if( hdr )\n    {\n        CV_DbgAssert((unsigned)i < (unsigned)hdr->dims);\n        return hdr->size[i];\n    }\n    return 0;\n}\n\ninline\nint SparseMat::dims() const\n{\n    return hdr ? hdr->dims : 0;\n}\n\ninline\nsize_t SparseMat::nzcount() const\n{\n    return hdr ? hdr->nodeCount : 0;\n}\n\ninline\nsize_t SparseMat::hash(int i0) const\n{\n    return (size_t)i0;\n}\n\ninline\nsize_t SparseMat::hash(int i0, int i1) const\n{\n    return (size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1;\n}\n\ninline\nsize_t SparseMat::hash(int i0, int i1, int i2) const\n{\n    return ((size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1) * HASH_SCALE + (unsigned)i2;\n}\n\ninline\nsize_t SparseMat::hash(const int* idx) const\n{\n    size_t h = (unsigned)idx[0];\n    if( !hdr )\n        return 0;\n    int d = hdr->dims;\n    for(int i = 1; i < d; i++ )\n        h = h * HASH_SCALE + (unsigned)idx[i];\n    return h;\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMat::ref(int i0, size_t* hashval)\n{\n    return *(_Tp*)((SparseMat*)this)->ptr(i0, true, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMat::ref(int i0, int i1, size_t* hashval)\n{\n    return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, true, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMat::ref(int i0, int i1, int i2, size_t* hashval)\n{\n    return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, i2, true, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMat::ref(const int* idx, size_t* hashval)\n{\n    return *(_Tp*)((SparseMat*)this)->ptr(idx, true, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp SparseMat::value(int i0, size_t* hashval) const\n{\n    const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval);\n    return p ? *p : _Tp();\n}\n\ntemplate<typename _Tp> inline\n_Tp SparseMat::value(int i0, int i1, size_t* hashval) const\n{\n    const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval);\n    return p ? *p : _Tp();\n}\n\ntemplate<typename _Tp> inline\n_Tp SparseMat::value(int i0, int i1, int i2, size_t* hashval) const\n{\n    const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval);\n    return p ? *p : _Tp();\n}\n\ntemplate<typename _Tp> inline\n_Tp SparseMat::value(const int* idx, size_t* hashval) const\n{\n    const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval);\n    return p ? *p : _Tp();\n}\n\ntemplate<typename _Tp> inline\nconst _Tp* SparseMat::find(int i0, size_t* hashval) const\n{\n    return (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp* SparseMat::find(int i0, int i1, size_t* hashval) const\n{\n    return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp* SparseMat::find(int i0, int i1, int i2, size_t* hashval) const\n{\n    return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp* SparseMat::find(const int* idx, size_t* hashval) const\n{\n    return (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMat::value(Node* n)\n{\n    return *(_Tp*)((uchar*)n + hdr->valueOffset);\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& SparseMat::value(const Node* n) const\n{\n    return *(const _Tp*)((const uchar*)n + hdr->valueOffset);\n}\n\ninline\nSparseMat::Node* SparseMat::node(size_t nidx)\n{\n    return (Node*)(void*)&hdr->pool[nidx];\n}\n\ninline\nconst SparseMat::Node* SparseMat::node(size_t nidx) const\n{\n    return (const Node*)(const void*)&hdr->pool[nidx];\n}\n\ninline\nSparseMatIterator SparseMat::begin()\n{\n    return SparseMatIterator(this);\n}\n\ninline\nSparseMatConstIterator SparseMat::begin() const\n{\n    return SparseMatConstIterator(this);\n}\n\ninline\nSparseMatIterator SparseMat::end()\n{\n    SparseMatIterator it(this);\n    it.seekEnd();\n    return it;\n}\n\ninline\nSparseMatConstIterator SparseMat::end() const\n{\n    SparseMatConstIterator it(this);\n    it.seekEnd();\n    return it;\n}\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp> SparseMat::begin()\n{\n    return SparseMatIterator_<_Tp>(this);\n}\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp> SparseMat::begin() const\n{\n    return SparseMatConstIterator_<_Tp>(this);\n}\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp> SparseMat::end()\n{\n    SparseMatIterator_<_Tp> it(this);\n    it.seekEnd();\n    return it;\n}\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp> SparseMat::end() const\n{\n    SparseMatConstIterator_<_Tp> it(this);\n    it.seekEnd();\n    return it;\n}\n\n\n\n///////////////////////////// SparseMat_ ////////////////////////////\n\ntemplate<typename _Tp> inline\nSparseMat_<_Tp>::SparseMat_()\n{\n    flags = MAGIC_VAL | DataType<_Tp>::type;\n}\n\ntemplate<typename _Tp> inline\nSparseMat_<_Tp>::SparseMat_(int _dims, const int* _sizes)\n    : SparseMat(_dims, _sizes, DataType<_Tp>::type)\n{}\n\ntemplate<typename _Tp> inline\nSparseMat_<_Tp>::SparseMat_(const SparseMat& m)\n{\n    if( m.type() == DataType<_Tp>::type )\n        *this = (const SparseMat_<_Tp>&)m;\n    else\n        m.convertTo(*this, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nSparseMat_<_Tp>::SparseMat_(const SparseMat_<_Tp>& m)\n{\n    this->flags = m.flags;\n    this->hdr = m.hdr;\n    if( this->hdr )\n        CV_XADD(&this->hdr->refcount, 1);\n}\n\ntemplate<typename _Tp> inline\nSparseMat_<_Tp>::SparseMat_(const Mat& m)\n{\n    SparseMat sm(m);\n    *this = sm;\n}\n\ntemplate<typename _Tp> inline\nSparseMat_<_Tp>& SparseMat_<_Tp>::operator = (const SparseMat_<_Tp>& m)\n{\n    if( this != &m )\n    {\n        if( m.hdr ) CV_XADD(&m.hdr->refcount, 1);\n        release();\n        flags = m.flags;\n        hdr = m.hdr;\n    }\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nSparseMat_<_Tp>& SparseMat_<_Tp>::operator = (const SparseMat& m)\n{\n    if( m.type() == DataType<_Tp>::type )\n        return (*this = (const SparseMat_<_Tp>&)m);\n    m.convertTo(*this, DataType<_Tp>::type);\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nSparseMat_<_Tp>& SparseMat_<_Tp>::operator = (const Mat& m)\n{\n    return (*this = SparseMat(m));\n}\n\ntemplate<typename _Tp> inline\nSparseMat_<_Tp> SparseMat_<_Tp>::clone() const\n{\n    SparseMat_<_Tp> m;\n    this->copyTo(m);\n    return m;\n}\n\ntemplate<typename _Tp> inline\nvoid SparseMat_<_Tp>::create(int _dims, const int* _sizes)\n{\n    SparseMat::create(_dims, _sizes, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nint SparseMat_<_Tp>::type() const\n{\n    return DataType<_Tp>::type;\n}\n\ntemplate<typename _Tp> inline\nint SparseMat_<_Tp>::depth() const\n{\n    return DataType<_Tp>::depth;\n}\n\ntemplate<typename _Tp> inline\nint SparseMat_<_Tp>::channels() const\n{\n    return DataType<_Tp>::channels;\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMat_<_Tp>::ref(int i0, size_t* hashval)\n{\n    return SparseMat::ref<_Tp>(i0, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp SparseMat_<_Tp>::operator()(int i0, size_t* hashval) const\n{\n    return SparseMat::value<_Tp>(i0, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMat_<_Tp>::ref(int i0, int i1, size_t* hashval)\n{\n    return SparseMat::ref<_Tp>(i0, i1, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp SparseMat_<_Tp>::operator()(int i0, int i1, size_t* hashval) const\n{\n    return SparseMat::value<_Tp>(i0, i1, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMat_<_Tp>::ref(int i0, int i1, int i2, size_t* hashval)\n{\n    return SparseMat::ref<_Tp>(i0, i1, i2, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp SparseMat_<_Tp>::operator()(int i0, int i1, int i2, size_t* hashval) const\n{\n    return SparseMat::value<_Tp>(i0, i1, i2, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMat_<_Tp>::ref(const int* idx, size_t* hashval)\n{\n    return SparseMat::ref<_Tp>(idx, hashval);\n}\n\ntemplate<typename _Tp> inline\n_Tp SparseMat_<_Tp>::operator()(const int* idx, size_t* hashval) const\n{\n    return SparseMat::value<_Tp>(idx, hashval);\n}\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp> SparseMat_<_Tp>::begin()\n{\n    return SparseMatIterator_<_Tp>(this);\n}\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp> SparseMat_<_Tp>::begin() const\n{\n    return SparseMatConstIterator_<_Tp>(this);\n}\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp> SparseMat_<_Tp>::end()\n{\n    SparseMatIterator_<_Tp> it(this);\n    it.seekEnd();\n    return it;\n}\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp> SparseMat_<_Tp>::end() const\n{\n    SparseMatConstIterator_<_Tp> it(this);\n    it.seekEnd();\n    return it;\n}\n\n\n\n////////////////////////// MatConstIterator /////////////////////////\n\ninline\nMatConstIterator::MatConstIterator()\n    : m(0), elemSize(0), ptr(0), sliceStart(0), sliceEnd(0)\n{}\n\ninline\nMatConstIterator::MatConstIterator(const Mat* _m)\n    : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0)\n{\n    if( m && m->isContinuous() )\n    {\n        sliceStart = m->ptr();\n        sliceEnd = sliceStart + m->total()*elemSize;\n    }\n    seek((const int*)0);\n}\n\ninline\nMatConstIterator::MatConstIterator(const Mat* _m, int _row, int _col)\n    : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0)\n{\n    CV_Assert(m && m->dims <= 2);\n    if( m->isContinuous() )\n    {\n        sliceStart = m->ptr();\n        sliceEnd = sliceStart + m->total()*elemSize;\n    }\n    int idx[] = {_row, _col};\n    seek(idx);\n}\n\ninline\nMatConstIterator::MatConstIterator(const Mat* _m, Point _pt)\n    : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0)\n{\n    CV_Assert(m && m->dims <= 2);\n    if( m->isContinuous() )\n    {\n        sliceStart = m->ptr();\n        sliceEnd = sliceStart + m->total()*elemSize;\n    }\n    int idx[] = {_pt.y, _pt.x};\n    seek(idx);\n}\n\ninline\nMatConstIterator::MatConstIterator(const MatConstIterator& it)\n    : m(it.m), elemSize(it.elemSize), ptr(it.ptr), sliceStart(it.sliceStart), sliceEnd(it.sliceEnd)\n{}\n\ninline\nMatConstIterator& MatConstIterator::operator = (const MatConstIterator& it )\n{\n    m = it.m; elemSize = it.elemSize; ptr = it.ptr;\n    sliceStart = it.sliceStart; sliceEnd = it.sliceEnd;\n    return *this;\n}\n\ninline\nconst uchar* MatConstIterator::operator *() const\n{\n    return ptr;\n}\n\ninline MatConstIterator& MatConstIterator::operator += (ptrdiff_t ofs)\n{\n    if( !m || ofs == 0 )\n        return *this;\n    ptrdiff_t ofsb = ofs*elemSize;\n    ptr += ofsb;\n    if( ptr < sliceStart || sliceEnd <= ptr )\n    {\n        ptr -= ofsb;\n        seek(ofs, true);\n    }\n    return *this;\n}\n\ninline\nMatConstIterator& MatConstIterator::operator -= (ptrdiff_t ofs)\n{\n    return (*this += -ofs);\n}\n\ninline\nMatConstIterator& MatConstIterator::operator --()\n{\n    if( m && (ptr -= elemSize) < sliceStart )\n    {\n        ptr += elemSize;\n        seek(-1, true);\n    }\n    return *this;\n}\n\ninline\nMatConstIterator MatConstIterator::operator --(int)\n{\n    MatConstIterator b = *this;\n    *this += -1;\n    return b;\n}\n\ninline\nMatConstIterator& MatConstIterator::operator ++()\n{\n    if( m && (ptr += elemSize) >= sliceEnd )\n    {\n        ptr -= elemSize;\n        seek(1, true);\n    }\n    return *this;\n}\n\ninline MatConstIterator MatConstIterator::operator ++(int)\n{\n    MatConstIterator b = *this;\n    *this += 1;\n    return b;\n}\n\n\nstatic inline\nbool operator == (const MatConstIterator& a, const MatConstIterator& b)\n{\n    return a.m == b.m && a.ptr == b.ptr;\n}\n\nstatic inline\nbool operator != (const MatConstIterator& a, const MatConstIterator& b)\n{\n    return !(a == b);\n}\n\nstatic inline\nbool operator < (const MatConstIterator& a, const MatConstIterator& b)\n{\n    return a.ptr < b.ptr;\n}\n\nstatic inline\nbool operator > (const MatConstIterator& a, const MatConstIterator& b)\n{\n    return a.ptr > b.ptr;\n}\n\nstatic inline\nbool operator <= (const MatConstIterator& a, const MatConstIterator& b)\n{\n    return a.ptr <= b.ptr;\n}\n\nstatic inline\nbool operator >= (const MatConstIterator& a, const MatConstIterator& b)\n{\n    return a.ptr >= b.ptr;\n}\n\nstatic inline\nptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a)\n{\n    if( a.m != b.m )\n        return ((size_t)(-1) >> 1);\n    if( a.sliceEnd == b.sliceEnd )\n        return (b.ptr - a.ptr)/b.elemSize;\n\n    return b.lpos() - a.lpos();\n}\n\nstatic inline\nMatConstIterator operator + (const MatConstIterator& a, ptrdiff_t ofs)\n{\n    MatConstIterator b = a;\n    return b += ofs;\n}\n\nstatic inline\nMatConstIterator operator + (ptrdiff_t ofs, const MatConstIterator& a)\n{\n    MatConstIterator b = a;\n    return b += ofs;\n}\n\nstatic inline\nMatConstIterator operator - (const MatConstIterator& a, ptrdiff_t ofs)\n{\n    MatConstIterator b = a;\n    return b += -ofs;\n}\n\n\ninline\nconst uchar* MatConstIterator::operator [](ptrdiff_t i) const\n{\n    return *(*this + i);\n}\n\n\n\n///////////////////////// MatConstIterator_ /////////////////////////\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp>::MatConstIterator_()\n{}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m)\n    : MatConstIterator(_m)\n{}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col)\n    : MatConstIterator(_m, _row, _col)\n{}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m, Point _pt)\n    : MatConstIterator(_m, _pt)\n{}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp>::MatConstIterator_(const MatConstIterator_& it)\n    : MatConstIterator(it)\n{}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator = (const MatConstIterator_& it )\n{\n    MatConstIterator::operator = (it);\n    return *this;\n}\n\ntemplate<typename _Tp> inline\n_Tp MatConstIterator_<_Tp>::operator *() const\n{\n    return *(_Tp*)(this->ptr);\n}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator += (ptrdiff_t ofs)\n{\n    MatConstIterator::operator += (ofs);\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator -= (ptrdiff_t ofs)\n{\n    return (*this += -ofs);\n}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator --()\n{\n    MatConstIterator::operator --();\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator --(int)\n{\n    MatConstIterator_ b = *this;\n    MatConstIterator::operator --();\n    return b;\n}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator ++()\n{\n    MatConstIterator::operator ++();\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator ++(int)\n{\n    MatConstIterator_ b = *this;\n    MatConstIterator::operator ++();\n    return b;\n}\n\n\ntemplate<typename _Tp> inline\nPoint MatConstIterator_<_Tp>::pos() const\n{\n    if( !m )\n        return Point();\n    CV_DbgAssert( m->dims <= 2 );\n    if( m->isContinuous() )\n    {\n        ptrdiff_t ofs = (const _Tp*)ptr - (const _Tp*)m->data;\n        int y = (int)(ofs / m->cols);\n        int x = (int)(ofs - (ptrdiff_t)y * m->cols);\n        return Point(x, y);\n    }\n    else\n    {\n        ptrdiff_t ofs = (uchar*)ptr - m->data;\n        int y = (int)(ofs / m->step);\n        int x = (int)((ofs - y * m->step)/sizeof(_Tp));\n        return Point(x, y);\n    }\n}\n\n\ntemplate<typename _Tp> static inline\nbool operator == (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b)\n{\n    return a.m == b.m && a.ptr == b.ptr;\n}\n\ntemplate<typename _Tp> static inline\nbool operator != (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b)\n{\n    return a.m != b.m || a.ptr != b.ptr;\n}\n\ntemplate<typename _Tp> static inline\nMatConstIterator_<_Tp> operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs)\n{\n    MatConstIterator t = (const MatConstIterator&)a + ofs;\n    return (MatConstIterator_<_Tp>&)t;\n}\n\ntemplate<typename _Tp> static inline\nMatConstIterator_<_Tp> operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a)\n{\n    MatConstIterator t = (const MatConstIterator&)a + ofs;\n    return (MatConstIterator_<_Tp>&)t;\n}\n\ntemplate<typename _Tp> static inline\nMatConstIterator_<_Tp> operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs)\n{\n    MatConstIterator t = (const MatConstIterator&)a - ofs;\n    return (MatConstIterator_<_Tp>&)t;\n}\n\ntemplate<typename _Tp> inline\n_Tp MatConstIterator_<_Tp>::operator [](ptrdiff_t i) const\n{\n    return *(_Tp*)MatConstIterator::operator [](i);\n}\n\n\n\n//////////////////////////// MatIterator_ ///////////////////////////\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>::MatIterator_()\n    : MatConstIterator_<_Tp>()\n{}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m)\n    : MatConstIterator_<_Tp>(_m)\n{}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, int _row, int _col)\n    : MatConstIterator_<_Tp>(_m, _row, _col)\n{}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, Point _pt)\n    : MatConstIterator_<_Tp>(_m, _pt)\n{}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, const int* _idx)\n    : MatConstIterator_<_Tp>(_m, _idx)\n{}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>::MatIterator_(const MatIterator_& it)\n    : MatConstIterator_<_Tp>(it)\n{}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>& MatIterator_<_Tp>::operator = (const MatIterator_<_Tp>& it )\n{\n    MatConstIterator::operator = (it);\n    return *this;\n}\n\ntemplate<typename _Tp> inline\n_Tp& MatIterator_<_Tp>::operator *() const\n{\n    return *(_Tp*)(this->ptr);\n}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>& MatIterator_<_Tp>::operator += (ptrdiff_t ofs)\n{\n    MatConstIterator::operator += (ofs);\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>& MatIterator_<_Tp>::operator -= (ptrdiff_t ofs)\n{\n    MatConstIterator::operator += (-ofs);\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>& MatIterator_<_Tp>::operator --()\n{\n    MatConstIterator::operator --();\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp> MatIterator_<_Tp>::operator --(int)\n{\n    MatIterator_ b = *this;\n    MatConstIterator::operator --();\n    return b;\n}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp>& MatIterator_<_Tp>::operator ++()\n{\n    MatConstIterator::operator ++();\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMatIterator_<_Tp> MatIterator_<_Tp>::operator ++(int)\n{\n    MatIterator_ b = *this;\n    MatConstIterator::operator ++();\n    return b;\n}\n\ntemplate<typename _Tp> inline\n_Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const\n{\n    return *(*this + i);\n}\n\n\ntemplate<typename _Tp> static inline\nbool operator == (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b)\n{\n    return a.m == b.m && a.ptr == b.ptr;\n}\n\ntemplate<typename _Tp> static inline\nbool operator != (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b)\n{\n    return a.m != b.m || a.ptr != b.ptr;\n}\n\ntemplate<typename _Tp> static inline\nMatIterator_<_Tp> operator + (const MatIterator_<_Tp>& a, ptrdiff_t ofs)\n{\n    MatConstIterator t = (const MatConstIterator&)a + ofs;\n    return (MatIterator_<_Tp>&)t;\n}\n\ntemplate<typename _Tp> static inline\nMatIterator_<_Tp> operator + (ptrdiff_t ofs, const MatIterator_<_Tp>& a)\n{\n    MatConstIterator t = (const MatConstIterator&)a + ofs;\n    return (MatIterator_<_Tp>&)t;\n}\n\ntemplate<typename _Tp> static inline\nMatIterator_<_Tp> operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs)\n{\n    MatConstIterator t = (const MatConstIterator&)a - ofs;\n    return (MatIterator_<_Tp>&)t;\n}\n\n\n\n/////////////////////// SparseMatConstIterator //////////////////////\n\ninline\nSparseMatConstIterator::SparseMatConstIterator()\n    : m(0), hashidx(0), ptr(0)\n{}\n\ninline\nSparseMatConstIterator::SparseMatConstIterator(const SparseMatConstIterator& it)\n    : m(it.m), hashidx(it.hashidx), ptr(it.ptr)\n{}\n\ninline SparseMatConstIterator& SparseMatConstIterator::operator = (const SparseMatConstIterator& it)\n{\n    if( this != &it )\n    {\n        m = it.m;\n        hashidx = it.hashidx;\n        ptr = it.ptr;\n    }\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& SparseMatConstIterator::value() const\n{\n    return *(const _Tp*)ptr;\n}\n\ninline\nconst SparseMat::Node* SparseMatConstIterator::node() const\n{\n    return (ptr && m && m->hdr) ? (const SparseMat::Node*)(const void*)(ptr - m->hdr->valueOffset) : 0;\n}\n\ninline\nSparseMatConstIterator SparseMatConstIterator::operator ++(int)\n{\n    SparseMatConstIterator it = *this;\n    ++*this;\n    return it;\n}\n\ninline\nvoid SparseMatConstIterator::seekEnd()\n{\n    if( m && m->hdr )\n    {\n        hashidx = m->hdr->hashtab.size();\n        ptr = 0;\n    }\n}\n\n\nstatic inline\nbool operator == (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2)\n{\n    return it1.m == it2.m && it1.ptr == it2.ptr;\n}\n\nstatic inline\nbool operator != (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2)\n{\n    return !(it1 == it2);\n}\n\n\n\n///////////////////////// SparseMatIterator /////////////////////////\n\ninline\nSparseMatIterator::SparseMatIterator()\n{}\n\ninline\nSparseMatIterator::SparseMatIterator(SparseMat* _m)\n    : SparseMatConstIterator(_m)\n{}\n\ninline\nSparseMatIterator::SparseMatIterator(const SparseMatIterator& it)\n    : SparseMatConstIterator(it)\n{}\n\ninline\nSparseMatIterator& SparseMatIterator::operator = (const SparseMatIterator& it)\n{\n    (SparseMatConstIterator&)*this = it;\n    return *this;\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMatIterator::value() const\n{\n    return *(_Tp*)ptr;\n}\n\ninline\nSparseMat::Node* SparseMatIterator::node() const\n{\n    return (SparseMat::Node*)SparseMatConstIterator::node();\n}\n\ninline\nSparseMatIterator& SparseMatIterator::operator ++()\n{\n    SparseMatConstIterator::operator ++();\n    return *this;\n}\n\ninline\nSparseMatIterator SparseMatIterator::operator ++(int)\n{\n    SparseMatIterator it = *this;\n    ++*this;\n    return it;\n}\n\n\n\n////////////////////// SparseMatConstIterator_ //////////////////////\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp>::SparseMatConstIterator_()\n{}\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat_<_Tp>* _m)\n    : SparseMatConstIterator(_m)\n{}\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat* _m)\n    : SparseMatConstIterator(_m)\n{\n    CV_Assert( _m->type() == DataType<_Tp>::type );\n}\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMatConstIterator_<_Tp>& it)\n    : SparseMatConstIterator(it)\n{}\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp>& SparseMatConstIterator_<_Tp>::operator = (const SparseMatConstIterator_<_Tp>& it)\n{\n    return reinterpret_cast<SparseMatConstIterator_<_Tp>&>\n         (*reinterpret_cast<SparseMatConstIterator*>(this) =\n           reinterpret_cast<const SparseMatConstIterator&>(it));\n}\n\ntemplate<typename _Tp> inline\nconst _Tp& SparseMatConstIterator_<_Tp>::operator *() const\n{\n    return *(const _Tp*)this->ptr;\n}\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp>& SparseMatConstIterator_<_Tp>::operator ++()\n{\n    SparseMatConstIterator::operator ++();\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nSparseMatConstIterator_<_Tp> SparseMatConstIterator_<_Tp>::operator ++(int)\n{\n    SparseMatConstIterator_<_Tp> it = *this;\n    SparseMatConstIterator::operator ++();\n    return it;\n}\n\n\n\n///////////////////////// SparseMatIterator_ ////////////////////////\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp>::SparseMatIterator_()\n{}\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat_<_Tp>* _m)\n    : SparseMatConstIterator_<_Tp>(_m)\n{}\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat* _m)\n    : SparseMatConstIterator_<_Tp>(_m)\n{}\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp>::SparseMatIterator_(const SparseMatIterator_<_Tp>& it)\n    : SparseMatConstIterator_<_Tp>(it)\n{}\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp>& SparseMatIterator_<_Tp>::operator = (const SparseMatIterator_<_Tp>& it)\n{\n    return reinterpret_cast<SparseMatIterator_<_Tp>&>\n         (*reinterpret_cast<SparseMatConstIterator*>(this) =\n           reinterpret_cast<const SparseMatConstIterator&>(it));\n}\n\ntemplate<typename _Tp> inline\n_Tp& SparseMatIterator_<_Tp>::operator *() const\n{\n    return *(_Tp*)this->ptr;\n}\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp>& SparseMatIterator_<_Tp>::operator ++()\n{\n    SparseMatConstIterator::operator ++();\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nSparseMatIterator_<_Tp> SparseMatIterator_<_Tp>::operator ++(int)\n{\n    SparseMatIterator_<_Tp> it = *this;\n    SparseMatConstIterator::operator ++();\n    return it;\n}\n\n\n\n//////////////////////// MatCommaInitializer_ ///////////////////////\n\ntemplate<typename _Tp> inline\nMatCommaInitializer_<_Tp>::MatCommaInitializer_(Mat_<_Tp>* _m)\n    : it(_m)\n{}\n\ntemplate<typename _Tp> template<typename T2> inline\nMatCommaInitializer_<_Tp>& MatCommaInitializer_<_Tp>::operator , (T2 v)\n{\n    CV_DbgAssert( this->it < ((const Mat_<_Tp>*)this->it.m)->end() );\n    *this->it = _Tp(v);\n    ++this->it;\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMatCommaInitializer_<_Tp>::operator Mat_<_Tp>() const\n{\n    CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() );\n    return Mat_<_Tp>(*this->it.m);\n}\n\n\ntemplate<typename _Tp, typename T2> static inline\nMatCommaInitializer_<_Tp> operator << (const Mat_<_Tp>& m, T2 val)\n{\n    MatCommaInitializer_<_Tp> commaInitializer((Mat_<_Tp>*)&m);\n    return (commaInitializer, val);\n}\n\n\n\n///////////////////////// Matrix Expressions ////////////////////////\n\ninline\nMat& Mat::operator = (const MatExpr& e)\n{\n    e.op->assign(e, *this);\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>::Mat_(const MatExpr& e)\n{\n    e.op->assign(e, *this, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nMat_<_Tp>& Mat_<_Tp>::operator = (const MatExpr& e)\n{\n    e.op->assign(e, *this, DataType<_Tp>::type);\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nMatExpr Mat_<_Tp>::zeros(int rows, int cols)\n{\n    return Mat::zeros(rows, cols, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nMatExpr Mat_<_Tp>::zeros(Size sz)\n{\n    return Mat::zeros(sz, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nMatExpr Mat_<_Tp>::ones(int rows, int cols)\n{\n    return Mat::ones(rows, cols, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nMatExpr Mat_<_Tp>::ones(Size sz)\n{\n    return Mat::ones(sz, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nMatExpr Mat_<_Tp>::eye(int rows, int cols)\n{\n    return Mat::eye(rows, cols, DataType<_Tp>::type);\n}\n\ntemplate<typename _Tp> inline\nMatExpr Mat_<_Tp>::eye(Size sz)\n{\n    return Mat::eye(sz, DataType<_Tp>::type);\n}\n\ninline\nMatExpr::MatExpr()\n    : op(0), flags(0), a(Mat()), b(Mat()), c(Mat()), alpha(0), beta(0), s()\n{}\n\ninline\nMatExpr::MatExpr(const MatOp* _op, int _flags, const Mat& _a, const Mat& _b,\n                 const Mat& _c, double _alpha, double _beta, const Scalar& _s)\n    : op(_op), flags(_flags), a(_a), b(_b), c(_c), alpha(_alpha), beta(_beta), s(_s)\n{}\n\ninline\nMatExpr::operator Mat() const\n{\n    Mat m;\n    op->assign(*this, m);\n    return m;\n}\n\ntemplate<typename _Tp> inline\nMatExpr::operator Mat_<_Tp>() const\n{\n    Mat_<_Tp> m;\n    op->assign(*this, m, DataType<_Tp>::type);\n    return m;\n}\n\n\ntemplate<typename _Tp> static inline\nMatExpr min(const Mat_<_Tp>& a, const Mat_<_Tp>& b)\n{\n    return cv::min((const Mat&)a, (const Mat&)b);\n}\n\ntemplate<typename _Tp> static inline\nMatExpr min(const Mat_<_Tp>& a, double s)\n{\n    return cv::min((const Mat&)a, s);\n}\n\ntemplate<typename _Tp> static inline\nMatExpr min(double s, const Mat_<_Tp>& a)\n{\n    return cv::min((const Mat&)a, s);\n}\n\ntemplate<typename _Tp> static inline\nMatExpr max(const Mat_<_Tp>& a, const Mat_<_Tp>& b)\n{\n    return cv::max((const Mat&)a, (const Mat&)b);\n}\n\ntemplate<typename _Tp> static inline\nMatExpr max(const Mat_<_Tp>& a, double s)\n{\n    return cv::max((const Mat&)a, s);\n}\n\ntemplate<typename _Tp> static inline\nMatExpr max(double s, const Mat_<_Tp>& a)\n{\n    return cv::max((const Mat&)a, s);\n}\n\ntemplate<typename _Tp> static inline\nMatExpr abs(const Mat_<_Tp>& m)\n{\n    return cv::abs((const Mat&)m);\n}\n\n\nstatic inline\nMat& operator += (Mat& a, const MatExpr& b)\n{\n    b.op->augAssignAdd(b, a);\n    return a;\n}\n\nstatic inline\nconst Mat& operator += (const Mat& a, const MatExpr& b)\n{\n    b.op->augAssignAdd(b, (Mat&)a);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nMat_<_Tp>& operator += (Mat_<_Tp>& a, const MatExpr& b)\n{\n    b.op->augAssignAdd(b, a);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nconst Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const MatExpr& b)\n{\n    b.op->augAssignAdd(b, (Mat&)a);\n    return a;\n}\n\nstatic inline\nMat& operator -= (Mat& a, const MatExpr& b)\n{\n    b.op->augAssignSubtract(b, a);\n    return a;\n}\n\nstatic inline\nconst Mat& operator -= (const Mat& a, const MatExpr& b)\n{\n    b.op->augAssignSubtract(b, (Mat&)a);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nMat_<_Tp>& operator -= (Mat_<_Tp>& a, const MatExpr& b)\n{\n    b.op->augAssignSubtract(b, a);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nconst Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const MatExpr& b)\n{\n    b.op->augAssignSubtract(b, (Mat&)a);\n    return a;\n}\n\nstatic inline\nMat& operator *= (Mat& a, const MatExpr& b)\n{\n    b.op->augAssignMultiply(b, a);\n    return a;\n}\n\nstatic inline\nconst Mat& operator *= (const Mat& a, const MatExpr& b)\n{\n    b.op->augAssignMultiply(b, (Mat&)a);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nMat_<_Tp>& operator *= (Mat_<_Tp>& a, const MatExpr& b)\n{\n    b.op->augAssignMultiply(b, a);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nconst Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const MatExpr& b)\n{\n    b.op->augAssignMultiply(b, (Mat&)a);\n    return a;\n}\n\nstatic inline\nMat& operator /= (Mat& a, const MatExpr& b)\n{\n    b.op->augAssignDivide(b, a);\n    return a;\n}\n\nstatic inline\nconst Mat& operator /= (const Mat& a, const MatExpr& b)\n{\n    b.op->augAssignDivide(b, (Mat&)a);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nMat_<_Tp>& operator /= (Mat_<_Tp>& a, const MatExpr& b)\n{\n    b.op->augAssignDivide(b, a);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nconst Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b)\n{\n    b.op->augAssignDivide(b, (Mat&)a);\n    return a;\n}\n\n\n//////////////////////////////// UMat ////////////////////////////////\n\ninline\nUMat::UMat(UMatUsageFlags _usageFlags)\n: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)\n{}\n\ninline\nUMat::UMat(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags)\n: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)\n{\n    create(_rows, _cols, _type);\n}\n\ninline\nUMat::UMat(int _rows, int _cols, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)\n: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)\n{\n    create(_rows, _cols, _type);\n    *this = _s;\n}\n\ninline\nUMat::UMat(Size _sz, int _type, UMatUsageFlags _usageFlags)\n: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)\n{\n    create( _sz.height, _sz.width, _type );\n}\n\ninline\nUMat::UMat(Size _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)\n: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)\n{\n    create(_sz.height, _sz.width, _type);\n    *this = _s;\n}\n\ninline\nUMat::UMat(int _dims, const int* _sz, int _type, UMatUsageFlags _usageFlags)\n: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)\n{\n    create(_dims, _sz, _type);\n}\n\ninline\nUMat::UMat(int _dims, const int* _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)\n: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)\n{\n    create(_dims, _sz, _type);\n    *this = _s;\n}\n\ninline\nUMat::UMat(const UMat& m)\n: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),\n  usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows)\n{\n    addref();\n    if( m.dims <= 2 )\n    {\n        step[0] = m.step[0]; step[1] = m.step[1];\n    }\n    else\n    {\n        dims = 0;\n        copySize(m);\n    }\n}\n\n\ntemplate<typename _Tp> inline\nUMat::UMat(const std::vector<_Tp>& vec, bool copyData)\n: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()),\ncols(1), allocator(0), usageFlags(USAGE_DEFAULT), u(0), offset(0), size(&rows)\n{\n    if(vec.empty())\n        return;\n    if( !copyData )\n    {\n        // !!!TODO!!!\n        CV_Error(Error::StsNotImplemented, \"\");\n    }\n    else\n        Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this);\n}\n\n\ninline\nUMat& UMat::operator = (const UMat& m)\n{\n    if( this != &m )\n    {\n        const_cast<UMat&>(m).addref();\n        release();\n        flags = m.flags;\n        if( dims <= 2 && m.dims <= 2 )\n        {\n            dims = m.dims;\n            rows = m.rows;\n            cols = m.cols;\n            step[0] = m.step[0];\n            step[1] = m.step[1];\n        }\n        else\n            copySize(m);\n        allocator = m.allocator;\n        if (usageFlags == USAGE_DEFAULT)\n            usageFlags = m.usageFlags;\n        u = m.u;\n        offset = m.offset;\n    }\n    return *this;\n}\n\ninline\nUMat UMat::row(int y) const\n{\n    return UMat(*this, Range(y, y + 1), Range::all());\n}\n\ninline\nUMat UMat::col(int x) const\n{\n    return UMat(*this, Range::all(), Range(x, x + 1));\n}\n\ninline\nUMat UMat::rowRange(int startrow, int endrow) const\n{\n    return UMat(*this, Range(startrow, endrow), Range::all());\n}\n\ninline\nUMat UMat::rowRange(const Range& r) const\n{\n    return UMat(*this, r, Range::all());\n}\n\ninline\nUMat UMat::colRange(int startcol, int endcol) const\n{\n    return UMat(*this, Range::all(), Range(startcol, endcol));\n}\n\ninline\nUMat UMat::colRange(const Range& r) const\n{\n    return UMat(*this, Range::all(), r);\n}\n\ninline\nUMat UMat::clone() const\n{\n    UMat m;\n    copyTo(m);\n    return m;\n}\n\ninline\nvoid UMat::assignTo( UMat& m, int _type ) const\n{\n    if( _type < 0 )\n        m = *this;\n    else\n        convertTo(m, _type);\n}\n\ninline\nvoid UMat::create(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags)\n{\n    _type &= TYPE_MASK;\n    if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && u )\n        return;\n    int sz[] = {_rows, _cols};\n    create(2, sz, _type, _usageFlags);\n}\n\ninline\nvoid UMat::create(Size _sz, int _type, UMatUsageFlags _usageFlags)\n{\n    create(_sz.height, _sz.width, _type, _usageFlags);\n}\n\ninline\nvoid UMat::addref()\n{\n    if( u )\n        CV_XADD(&(u->urefcount), 1);\n}\n\ninline void UMat::release()\n{\n    if( u && CV_XADD(&(u->urefcount), -1) == 1 )\n        deallocate();\n    for(int i = 0; i < dims; i++)\n        size.p[i] = 0;\n    u = 0;\n}\n\ninline\nUMat UMat::operator()( Range _rowRange, Range _colRange ) const\n{\n    return UMat(*this, _rowRange, _colRange);\n}\n\ninline\nUMat UMat::operator()( const Rect& roi ) const\n{\n    return UMat(*this, roi);\n}\n\ninline\nUMat UMat::operator()(const Range* ranges) const\n{\n    return UMat(*this, ranges);\n}\n\ninline\nbool UMat::isContinuous() const\n{\n    return (flags & CONTINUOUS_FLAG) != 0;\n}\n\ninline\nbool UMat::isSubmatrix() const\n{\n    return (flags & SUBMATRIX_FLAG) != 0;\n}\n\ninline\nsize_t UMat::elemSize() const\n{\n    return dims > 0 ? step.p[dims - 1] : 0;\n}\n\ninline\nsize_t UMat::elemSize1() const\n{\n    return CV_ELEM_SIZE1(flags);\n}\n\ninline\nint UMat::type() const\n{\n    return CV_MAT_TYPE(flags);\n}\n\ninline\nint UMat::depth() const\n{\n    return CV_MAT_DEPTH(flags);\n}\n\ninline\nint UMat::channels() const\n{\n    return CV_MAT_CN(flags);\n}\n\ninline\nsize_t UMat::step1(int i) const\n{\n    return step.p[i] / elemSize1();\n}\n\ninline\nbool UMat::empty() const\n{\n    return u == 0 || total() == 0;\n}\n\ninline\nsize_t UMat::total() const\n{\n    if( dims <= 2 )\n        return (size_t)rows * cols;\n    size_t p = 1;\n    for( int i = 0; i < dims; i++ )\n        p *= size[i];\n    return p;\n}\n\n#ifdef CV_CXX_MOVE_SEMANTICS\n\ninline\nUMat::UMat(UMat&& m)\n: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),\n  usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows)\n{\n    if (m.dims <= 2)  // move new step/size info\n    {\n        step[0] = m.step[0];\n        step[1] = m.step[1];\n    }\n    else\n    {\n        CV_DbgAssert(m.step.p != m.step.buf);\n        step.p = m.step.p;\n        size.p = m.size.p;\n        m.step.p = m.step.buf;\n        m.size.p = &m.rows;\n    }\n    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;\n    m.allocator = NULL;\n    m.u = NULL;\n    m.offset = 0;\n}\n\ninline\nUMat& UMat::operator = (UMat&& m)\n{\n    release();\n    flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols;\n    allocator = m.allocator; usageFlags = m.usageFlags;\n    u = m.u;\n    offset = m.offset;\n    if (step.p != step.buf) // release self step/size\n    {\n        fastFree(step.p);\n        step.p = step.buf;\n        size.p = &rows;\n    }\n    if (m.dims <= 2) // move new step/size info\n    {\n        step[0] = m.step[0];\n        step[1] = m.step[1];\n    }\n    else\n    {\n        CV_DbgAssert(m.step.p != m.step.buf);\n        step.p = m.step.p;\n        size.p = m.size.p;\n        m.step.p = m.step.buf;\n        m.size.p = &m.rows;\n    }\n    m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;\n    m.allocator = NULL;\n    m.u = NULL;\n    m.offset = 0;\n    return *this;\n}\n\n#endif\n\n\ninline bool UMatData::hostCopyObsolete() const { return (flags & HOST_COPY_OBSOLETE) != 0; }\ninline bool UMatData::deviceCopyObsolete() const { return (flags & DEVICE_COPY_OBSOLETE) != 0; }\ninline bool UMatData::deviceMemMapped() const { return (flags & DEVICE_MEM_MAPPED) != 0; }\ninline bool UMatData::copyOnMap() const { return (flags & COPY_ON_MAP) != 0; }\ninline bool UMatData::tempUMat() const { return (flags & TEMP_UMAT) != 0; }\ninline bool UMatData::tempCopiedUMat() const { return (flags & TEMP_COPIED_UMAT) == TEMP_COPIED_UMAT; }\n\ninline void UMatData::markDeviceMemMapped(bool flag)\n{\n  if(flag)\n    flags |= DEVICE_MEM_MAPPED;\n  else\n    flags &= ~DEVICE_MEM_MAPPED;\n}\n\ninline void UMatData::markHostCopyObsolete(bool flag)\n{\n    if(flag)\n        flags |= HOST_COPY_OBSOLETE;\n    else\n        flags &= ~HOST_COPY_OBSOLETE;\n}\ninline void UMatData::markDeviceCopyObsolete(bool flag)\n{\n    if(flag)\n        flags |= DEVICE_COPY_OBSOLETE;\n    else\n        flags &= ~DEVICE_COPY_OBSOLETE;\n}\n\ninline UMatDataAutoLock::UMatDataAutoLock(UMatData* _u) : u(_u) { u->lock(); }\ninline UMatDataAutoLock::~UMatDataAutoLock() { u->unlock(); }\n\n//! @endcond\n\n} //cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/matx.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_MATX_HPP__\n#define __OPENCV_CORE_MATX_HPP__\n\n#ifndef __cplusplus\n#  error matx.hpp header must be compiled as C++\n#endif\n\n#include \"opencv2/core/cvdef.h\"\n#include \"opencv2/core/base.hpp\"\n#include \"opencv2/core/traits.hpp\"\n#include \"opencv2/core/saturate.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup core_basic\n//! @{\n\n////////////////////////////// Small Matrix ///////////////////////////\n\n//! @cond IGNORED\nstruct CV_EXPORTS Matx_AddOp {};\nstruct CV_EXPORTS Matx_SubOp {};\nstruct CV_EXPORTS Matx_ScaleOp {};\nstruct CV_EXPORTS Matx_MulOp {};\nstruct CV_EXPORTS Matx_DivOp {};\nstruct CV_EXPORTS Matx_MatMulOp {};\nstruct CV_EXPORTS Matx_TOp {};\n//! @endcond\n\n/** @brief Template class for small matrices whose type and size are known at compilation time\n\nIf you need a more flexible type, use Mat . The elements of the matrix M are accessible using the\nM(i,j) notation. Most of the common matrix operations (see also @ref MatrixExpressions ) are\navailable. To do an operation on Matx that is not implemented, you can easily convert the matrix to\nMat and backwards:\n@code\n    Matx33f m(1, 2, 3,\n              4, 5, 6,\n              7, 8, 9);\n    cout << sum(Mat(m*m.t())) << endl;\n @endcode\n */\ntemplate<typename _Tp, int m, int n> class Matx\n{\npublic:\n    enum { depth    = DataType<_Tp>::depth,\n           rows     = m,\n           cols     = n,\n           channels = rows*cols,\n           type     = CV_MAKETYPE(depth, channels),\n           shortdim = (m < n ? m : n)\n         };\n\n    typedef _Tp                           value_type;\n    typedef Matx<_Tp, m, n>               mat_type;\n    typedef Matx<_Tp, shortdim, 1> diag_type;\n\n    //! default constructor\n    Matx();\n\n    Matx(_Tp v0); //!< 1x1 matrix\n    Matx(_Tp v0, _Tp v1); //!< 1x2 or 2x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2); //!< 1x3 or 3x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 1x4, 2x2 or 4x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 1x5 or 5x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 1x6, 2x3, 3x2 or 6x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 1x7 or 7x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 1x8, 2x4, 4x2 or 8x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 1x9, 3x3 or 9x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 1x10, 2x5 or 5x2 or 10x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,\n         _Tp v4, _Tp v5, _Tp v6, _Tp v7,\n         _Tp v8, _Tp v9, _Tp v10, _Tp v11); //!< 1x12, 2x6, 3x4, 4x3, 6x2 or 12x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,\n         _Tp v4, _Tp v5, _Tp v6, _Tp v7,\n         _Tp v8, _Tp v9, _Tp v10, _Tp v11,\n         _Tp v12, _Tp v13); //!< 1x14, 2x7, 7x2 or 14x1 matrix\n    Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,\n         _Tp v4, _Tp v5, _Tp v6, _Tp v7,\n         _Tp v8, _Tp v9, _Tp v10, _Tp v11,\n         _Tp v12, _Tp v13, _Tp v14, _Tp v15); //!< 1x16, 4x4 or 16x1 matrix\n    explicit Matx(const _Tp* vals); //!< initialize from a plain array\n\n    static Matx all(_Tp alpha);\n    static Matx zeros();\n    static Matx ones();\n    static Matx eye();\n    static Matx diag(const diag_type& d);\n    static Matx randu(_Tp a, _Tp b);\n    static Matx randn(_Tp a, _Tp b);\n\n    //! dot product computed with the default precision\n    _Tp dot(const Matx<_Tp, m, n>& v) const;\n\n    //! dot product computed in double-precision arithmetics\n    double ddot(const Matx<_Tp, m, n>& v) const;\n\n    //! conversion to another data type\n    template<typename T2> operator Matx<T2, m, n>() const;\n\n    //! change the matrix shape\n    template<int m1, int n1> Matx<_Tp, m1, n1> reshape() const;\n\n    //! extract part of the matrix\n    template<int m1, int n1> Matx<_Tp, m1, n1> get_minor(int i, int j) const;\n\n    //! extract the matrix row\n    Matx<_Tp, 1, n> row(int i) const;\n\n    //! extract the matrix column\n    Matx<_Tp, m, 1> col(int i) const;\n\n    //! extract the matrix diagonal\n    diag_type diag() const;\n\n    //! transpose the matrix\n    Matx<_Tp, n, m> t() const;\n\n    //! invert the matrix\n    Matx<_Tp, n, m> inv(int method=DECOMP_LU, bool *p_is_ok = NULL) const;\n\n    //! solve linear system\n    template<int l> Matx<_Tp, n, l> solve(const Matx<_Tp, m, l>& rhs, int flags=DECOMP_LU) const;\n    Vec<_Tp, n> solve(const Vec<_Tp, m>& rhs, int method) const;\n\n    //! multiply two matrices element-wise\n    Matx<_Tp, m, n> mul(const Matx<_Tp, m, n>& a) const;\n\n    //! divide two matrices element-wise\n    Matx<_Tp, m, n> div(const Matx<_Tp, m, n>& a) const;\n\n    //! element access\n    const _Tp& operator ()(int i, int j) const;\n    _Tp& operator ()(int i, int j);\n\n    //! 1D element access\n    const _Tp& operator ()(int i) const;\n    _Tp& operator ()(int i);\n\n    Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp);\n    Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp);\n    template<typename _T2> Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp);\n    Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp);\n    Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_DivOp);\n    template<int l> Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp);\n    Matx(const Matx<_Tp, n, m>& a, Matx_TOp);\n\n    _Tp val[m*n]; //< matrix elements\n};\n\ntypedef Matx<float, 1, 2> Matx12f;\ntypedef Matx<double, 1, 2> Matx12d;\ntypedef Matx<float, 1, 3> Matx13f;\ntypedef Matx<double, 1, 3> Matx13d;\ntypedef Matx<float, 1, 4> Matx14f;\ntypedef Matx<double, 1, 4> Matx14d;\ntypedef Matx<float, 1, 6> Matx16f;\ntypedef Matx<double, 1, 6> Matx16d;\n\ntypedef Matx<float, 2, 1> Matx21f;\ntypedef Matx<double, 2, 1> Matx21d;\ntypedef Matx<float, 3, 1> Matx31f;\ntypedef Matx<double, 3, 1> Matx31d;\ntypedef Matx<float, 4, 1> Matx41f;\ntypedef Matx<double, 4, 1> Matx41d;\ntypedef Matx<float, 6, 1> Matx61f;\ntypedef Matx<double, 6, 1> Matx61d;\n\ntypedef Matx<float, 2, 2> Matx22f;\ntypedef Matx<double, 2, 2> Matx22d;\ntypedef Matx<float, 2, 3> Matx23f;\ntypedef Matx<double, 2, 3> Matx23d;\ntypedef Matx<float, 3, 2> Matx32f;\ntypedef Matx<double, 3, 2> Matx32d;\n\ntypedef Matx<float, 3, 3> Matx33f;\ntypedef Matx<double, 3, 3> Matx33d;\n\ntypedef Matx<float, 3, 4> Matx34f;\ntypedef Matx<double, 3, 4> Matx34d;\ntypedef Matx<float, 4, 3> Matx43f;\ntypedef Matx<double, 4, 3> Matx43d;\n\ntypedef Matx<float, 4, 4> Matx44f;\ntypedef Matx<double, 4, 4> Matx44d;\ntypedef Matx<float, 6, 6> Matx66f;\ntypedef Matx<double, 6, 6> Matx66d;\n\n/*!\n  traits\n*/\ntemplate<typename _Tp, int m, int n> class DataType< Matx<_Tp, m, n> >\n{\npublic:\n    typedef Matx<_Tp, m, n>                               value_type;\n    typedef Matx<typename DataType<_Tp>::work_type, m, n> work_type;\n    typedef _Tp                                           channel_type;\n    typedef value_type                                    vec_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = m * n,\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\n/** @brief  Comma-separated Matrix Initializer\n*/\ntemplate<typename _Tp, int m, int n> class MatxCommaInitializer\n{\npublic:\n    MatxCommaInitializer(Matx<_Tp, m, n>* _mtx);\n    template<typename T2> MatxCommaInitializer<_Tp, m, n>& operator , (T2 val);\n    Matx<_Tp, m, n> operator *() const;\n\n    Matx<_Tp, m, n>* dst;\n    int idx;\n};\n\n/*\n Utility methods\n*/\ntemplate<typename _Tp, int m> static double determinant(const Matx<_Tp, m, m>& a);\ntemplate<typename _Tp, int m, int n> static double trace(const Matx<_Tp, m, n>& a);\ntemplate<typename _Tp, int m, int n> static double norm(const Matx<_Tp, m, n>& M);\ntemplate<typename _Tp, int m, int n> static double norm(const Matx<_Tp, m, n>& M, int normType);\n\n\n\n/////////////////////// Vec (used as element of multi-channel images /////////////////////\n\n/** @brief Template class for short numerical vectors, a partial case of Matx\n\nThis template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) on which you\ncan perform basic arithmetical operations, access individual elements using [] operator etc. The\nvectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., which\nelements are dynamically allocated in the heap.\n\nThe template takes 2 parameters:\n@tparam _Tp element type\n@tparam cn the number of elements\n\nIn addition to the universal notation like Vec<float, 3>, you can use shorter aliases\nfor the most popular specialized variants of Vec, e.g. Vec3f ~ Vec<float, 3>.\n\nIt is possible to convert Vec\\<T,2\\> to/from Point_, Vec\\<T,3\\> to/from Point3_ , and Vec\\<T,4\\>\nto CvScalar or Scalar_. Use operator[] to access the elements of Vec.\n\nAll the expected vector operations are also implemented:\n-   v1 = v2 + v3\n-   v1 = v2 - v3\n-   v1 = v2 \\* scale\n-   v1 = scale \\* v2\n-   v1 = -v2\n-   v1 += v2 and other augmenting operations\n-   v1 == v2, v1 != v2\n-   norm(v1) (euclidean norm)\nThe Vec class is commonly used to describe pixel types of multi-channel arrays. See Mat for details.\n*/\ntemplate<typename _Tp, int cn> class Vec : public Matx<_Tp, cn, 1>\n{\npublic:\n    typedef _Tp value_type;\n    enum { depth    = Matx<_Tp, cn, 1>::depth,\n           channels = cn,\n           type     = CV_MAKETYPE(depth, channels)\n         };\n\n    //! default constructor\n    Vec();\n\n    Vec(_Tp v0); //!< 1-element vector constructor\n    Vec(_Tp v0, _Tp v1); //!< 2-element vector constructor\n    Vec(_Tp v0, _Tp v1, _Tp v2); //!< 3-element vector constructor\n    Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 4-element vector constructor\n    Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 5-element vector constructor\n    Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 6-element vector constructor\n    Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 7-element vector constructor\n    Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 8-element vector constructor\n    Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 9-element vector constructor\n    Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 10-element vector constructor\n    Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11, _Tp v12, _Tp v13); //!< 14-element vector constructor\n    explicit Vec(const _Tp* values);\n\n    Vec(const Vec<_Tp, cn>& v);\n\n    static Vec all(_Tp alpha);\n\n    //! per-element multiplication\n    Vec mul(const Vec<_Tp, cn>& v) const;\n\n    //! conjugation (makes sense for complex numbers and quaternions)\n    Vec conj() const;\n\n    /*!\n      cross product of the two 3D vectors.\n\n      For other dimensionalities the exception is raised\n    */\n    Vec cross(const Vec& v) const;\n    //! conversion to another data type\n    template<typename T2> operator Vec<T2, cn>() const;\n\n    /*! element access */\n    const _Tp& operator [](int i) const;\n    _Tp& operator[](int i);\n    const _Tp& operator ()(int i) const;\n    _Tp& operator ()(int i);\n\n    Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp);\n    Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp);\n    template<typename _T2> Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp);\n};\n\n/** @name Shorter aliases for the most popular specializations of Vec<T,n>\n  @{\n*/\ntypedef Vec<uchar, 2> Vec2b;\ntypedef Vec<uchar, 3> Vec3b;\ntypedef Vec<uchar, 4> Vec4b;\n\ntypedef Vec<short, 2> Vec2s;\ntypedef Vec<short, 3> Vec3s;\ntypedef Vec<short, 4> Vec4s;\n\ntypedef Vec<ushort, 2> Vec2w;\ntypedef Vec<ushort, 3> Vec3w;\ntypedef Vec<ushort, 4> Vec4w;\n\ntypedef Vec<int, 2> Vec2i;\ntypedef Vec<int, 3> Vec3i;\ntypedef Vec<int, 4> Vec4i;\ntypedef Vec<int, 6> Vec6i;\ntypedef Vec<int, 8> Vec8i;\n\ntypedef Vec<float, 2> Vec2f;\ntypedef Vec<float, 3> Vec3f;\ntypedef Vec<float, 4> Vec4f;\ntypedef Vec<float, 6> Vec6f;\n\ntypedef Vec<double, 2> Vec2d;\ntypedef Vec<double, 3> Vec3d;\ntypedef Vec<double, 4> Vec4d;\ntypedef Vec<double, 6> Vec6d;\n/** @} */\n\n/*!\n  traits\n*/\ntemplate<typename _Tp, int cn> class DataType< Vec<_Tp, cn> >\n{\npublic:\n    typedef Vec<_Tp, cn>                               value_type;\n    typedef Vec<typename DataType<_Tp>::work_type, cn> work_type;\n    typedef _Tp                                        channel_type;\n    typedef value_type                                 vec_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = cn,\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\n/** @brief  Comma-separated Vec Initializer\n*/\ntemplate<typename _Tp, int m> class VecCommaInitializer : public MatxCommaInitializer<_Tp, m, 1>\n{\npublic:\n    VecCommaInitializer(Vec<_Tp, m>* _vec);\n    template<typename T2> VecCommaInitializer<_Tp, m>& operator , (T2 val);\n    Vec<_Tp, m> operator *() const;\n};\n\ntemplate<typename _Tp, int cn> static Vec<_Tp, cn> normalize(const Vec<_Tp, cn>& v);\n\n//! @} core_basic\n\n//! @cond IGNORED\n\n///////////////////////////////////// helper classes /////////////////////////////////////\nnamespace internal\n{\n\ntemplate<typename _Tp, int m> struct Matx_DetOp\n{\n    double operator ()(const Matx<_Tp, m, m>& a) const\n    {\n        Matx<_Tp, m, m> temp = a;\n        double p = LU(temp.val, m*sizeof(_Tp), m, 0, 0, 0);\n        if( p == 0 )\n            return p;\n        for( int i = 0; i < m; i++ )\n            p *= temp(i, i);\n        return 1./p;\n    }\n};\n\ntemplate<typename _Tp> struct Matx_DetOp<_Tp, 1>\n{\n    double operator ()(const Matx<_Tp, 1, 1>& a) const\n    {\n        return a(0,0);\n    }\n};\n\ntemplate<typename _Tp> struct Matx_DetOp<_Tp, 2>\n{\n    double operator ()(const Matx<_Tp, 2, 2>& a) const\n    {\n        return a(0,0)*a(1,1) - a(0,1)*a(1,0);\n    }\n};\n\ntemplate<typename _Tp> struct Matx_DetOp<_Tp, 3>\n{\n    double operator ()(const Matx<_Tp, 3, 3>& a) const\n    {\n        return a(0,0)*(a(1,1)*a(2,2) - a(2,1)*a(1,2)) -\n            a(0,1)*(a(1,0)*a(2,2) - a(2,0)*a(1,2)) +\n            a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1));\n    }\n};\n\ntemplate<typename _Tp> Vec<_Tp, 2> inline conjugate(const Vec<_Tp, 2>& v)\n{\n    return Vec<_Tp, 2>(v[0], -v[1]);\n}\n\ntemplate<typename _Tp> Vec<_Tp, 4> inline conjugate(const Vec<_Tp, 4>& v)\n{\n    return Vec<_Tp, 4>(v[0], -v[1], -v[2], -v[3]);\n}\n\n} // internal\n\n\n\n////////////////////////////////// Matx Implementation ///////////////////////////////////\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx()\n{\n    for(int i = 0; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(_Tp v0)\n{\n    val[0] = v0;\n    for(int i = 1; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(_Tp v0, _Tp v1)\n{\n    CV_StaticAssert(channels >= 2, \"Matx should have at least 2 elements.\");\n    val[0] = v0; val[1] = v1;\n    for(int i = 2; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2)\n{\n    CV_StaticAssert(channels >= 3, \"Matx should have at least 3 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2;\n    for(int i = 3; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3)\n{\n    CV_StaticAssert(channels >= 4, \"Matx should have at least 4 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;\n    for(int i = 4; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4)\n{\n    CV_StaticAssert(channels >= 5, \"Matx should have at least 5 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4;\n    for(int i = 5; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5)\n{\n    CV_StaticAssert(channels >= 6, \"Matx should have at least 6 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;\n    val[4] = v4; val[5] = v5;\n    for(int i = 6; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6)\n{\n    CV_StaticAssert(channels >= 7, \"Matx should have at least 7 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;\n    val[4] = v4; val[5] = v5; val[6] = v6;\n    for(int i = 7; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7)\n{\n    CV_StaticAssert(channels >= 8, \"Matx should have at least 8 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;\n    val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;\n    for(int i = 8; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8)\n{\n    CV_StaticAssert(channels >= 9, \"Matx should have at least 9 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;\n    val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;\n    val[8] = v8;\n    for(int i = 9; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9)\n{\n    CV_StaticAssert(channels >= 10, \"Matx should have at least 10 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;\n    val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;\n    val[8] = v8; val[9] = v9;\n    for(int i = 10; i < channels; i++) val[i] = _Tp(0);\n}\n\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11)\n{\n    CV_StaticAssert(channels >= 12, \"Matx should have at least 12 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;\n    val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;\n    val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11;\n    for(int i = 12; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11, _Tp v12, _Tp v13)\n{\n    CV_StaticAssert(channels == 14, \"Matx should have at least 14 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;\n    val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;\n    val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11;\n    val[12] = v12; val[13] = v13;\n}\n\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11, _Tp v12, _Tp v13, _Tp v14, _Tp v15)\n{\n    CV_StaticAssert(channels >= 16, \"Matx should have at least 16 elements.\");\n    val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;\n    val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;\n    val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11;\n    val[12] = v12; val[13] = v13; val[14] = v14; val[15] = v15;\n    for(int i = 16; i < channels; i++) val[i] = _Tp(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n>::Matx(const _Tp* values)\n{\n    for( int i = 0; i < channels; i++ ) val[i] = values[i];\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n> Matx<_Tp, m, n>::all(_Tp alpha)\n{\n    Matx<_Tp, m, n> M;\n    for( int i = 0; i < m*n; i++ ) M.val[i] = alpha;\n    return M;\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n> Matx<_Tp,m,n>::zeros()\n{\n    return all(0);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n> Matx<_Tp,m,n>::ones()\n{\n    return all(1);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n> Matx<_Tp,m,n>::eye()\n{\n    Matx<_Tp,m,n> M;\n    for(int i = 0; i < shortdim; i++)\n        M(i,i) = 1;\n    return M;\n}\n\ntemplate<typename _Tp, int m, int n> inline\n_Tp Matx<_Tp, m, n>::dot(const Matx<_Tp, m, n>& M) const\n{\n    _Tp s = 0;\n    for( int i = 0; i < channels; i++ ) s += val[i]*M.val[i];\n    return s;\n}\n\ntemplate<typename _Tp, int m, int n> inline\ndouble Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const\n{\n    double s = 0;\n    for( int i = 0; i < channels; i++ ) s += (double)val[i]*M.val[i];\n    return s;\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n> Matx<_Tp,m,n>::diag(const typename Matx<_Tp,m,n>::diag_type& d)\n{\n    Matx<_Tp,m,n> M;\n    for(int i = 0; i < shortdim; i++)\n        M(i,i) = d(i, 0);\n    return M;\n}\n\ntemplate<typename _Tp, int m, int n> template<typename T2>\ninline Matx<_Tp, m, n>::operator Matx<T2, m, n>() const\n{\n    Matx<T2, m, n> M;\n    for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast<T2>(val[i]);\n    return M;\n}\n\ntemplate<typename _Tp, int m, int n> template<int m1, int n1> inline\nMatx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const\n{\n    CV_StaticAssert(m1*n1 == m*n, \"Input and destnarion matrices must have the same number of elements\");\n    return (const Matx<_Tp, m1, n1>&)*this;\n}\n\ntemplate<typename _Tp, int m, int n>\ntemplate<int m1, int n1> inline\nMatx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int i, int j) const\n{\n    CV_DbgAssert(0 <= i && i+m1 <= m && 0 <= j && j+n1 <= n);\n    Matx<_Tp, m1, n1> s;\n    for( int di = 0; di < m1; di++ )\n        for( int dj = 0; dj < n1; dj++ )\n            s(di, dj) = (*this)(i+di, j+dj);\n    return s;\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, 1, n> Matx<_Tp, m, n>::row(int i) const\n{\n    CV_DbgAssert((unsigned)i < (unsigned)m);\n    return Matx<_Tp, 1, n>(&val[i*n]);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const\n{\n    CV_DbgAssert((unsigned)j < (unsigned)n);\n    Matx<_Tp, m, 1> v;\n    for( int i = 0; i < m; i++ )\n        v.val[i] = val[i*n + j];\n    return v;\n}\n\ntemplate<typename _Tp, int m, int n> inline\ntypename Matx<_Tp, m, n>::diag_type Matx<_Tp, m, n>::diag() const\n{\n    diag_type d;\n    for( int i = 0; i < shortdim; i++ )\n        d.val[i] = val[i*n + i];\n    return d;\n}\n\ntemplate<typename _Tp, int m, int n> inline\nconst _Tp& Matx<_Tp, m, n>::operator()(int i, int j) const\n{\n    CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n );\n    return this->val[i*n + j];\n}\n\ntemplate<typename _Tp, int m, int n> inline\n_Tp& Matx<_Tp, m, n>::operator ()(int i, int j)\n{\n    CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n );\n    return val[i*n + j];\n}\n\ntemplate<typename _Tp, int m, int n> inline\nconst _Tp& Matx<_Tp, m, n>::operator ()(int i) const\n{\n    CV_StaticAssert(m == 1 || n == 1, \"Single index indexation requires matrix to be a column or a row\");\n    CV_DbgAssert( (unsigned)i < (unsigned)(m+n-1) );\n    return val[i];\n}\n\ntemplate<typename _Tp, int m, int n> inline\n_Tp& Matx<_Tp, m, n>::operator ()(int i)\n{\n    CV_StaticAssert(m == 1 || n == 1, \"Single index indexation requires matrix to be a column or a row\");\n    CV_DbgAssert( (unsigned)i < (unsigned)(m+n-1) );\n    return val[i];\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp)\n{\n    for( int i = 0; i < channels; i++ )\n        val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp)\n{\n    for( int i = 0; i < channels; i++ )\n        val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]);\n}\n\ntemplate<typename _Tp, int m, int n> template<typename _T2> inline\nMatx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp)\n{\n    for( int i = 0; i < channels; i++ )\n        val[i] = saturate_cast<_Tp>(a.val[i] * alpha);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp)\n{\n    for( int i = 0; i < channels; i++ )\n        val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_DivOp)\n{\n    for( int i = 0; i < channels; i++ )\n        val[i] = saturate_cast<_Tp>(a.val[i] / b.val[i]);\n}\n\ntemplate<typename _Tp, int m, int n> template<int l> inline\nMatx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp)\n{\n    for( int i = 0; i < m; i++ )\n        for( int j = 0; j < n; j++ )\n        {\n            _Tp s = 0;\n            for( int k = 0; k < l; k++ )\n                s += a(i, k) * b(k, j);\n            val[i*n + j] = s;\n        }\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp)\n{\n    for( int i = 0; i < m; i++ )\n        for( int j = 0; j < n; j++ )\n            val[i*n + j] = a(j, i);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const\n{\n    return Matx<_Tp, m, n>(*this, a, Matx_MulOp());\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n> Matx<_Tp, m, n>::div(const Matx<_Tp, m, n>& a) const\n{\n    return Matx<_Tp, m, n>(*this, a, Matx_DivOp());\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, n, m> Matx<_Tp, m, n>::t() const\n{\n    return Matx<_Tp, n, m>(*this, Matx_TOp());\n}\n\ntemplate<typename _Tp, int m, int n> inline\nVec<_Tp, n> Matx<_Tp, m, n>::solve(const Vec<_Tp, m>& rhs, int method) const\n{\n    Matx<_Tp, n, 1> x = solve((const Matx<_Tp, m, 1>&)(rhs), method);\n    return (Vec<_Tp, n>&)(x);\n}\n\ntemplate<typename _Tp, int m> static inline\ndouble determinant(const Matx<_Tp, m, m>& a)\n{\n    return cv::internal::Matx_DetOp<_Tp, m>()(a);\n}\n\ntemplate<typename _Tp, int m, int n> static inline\ndouble trace(const Matx<_Tp, m, n>& a)\n{\n    _Tp s = 0;\n    for( int i = 0; i < std::min(m, n); i++ )\n        s += a(i,i);\n    return s;\n}\n\ntemplate<typename _Tp, int m, int n> static inline\ndouble norm(const Matx<_Tp, m, n>& M)\n{\n    return std::sqrt(normL2Sqr<_Tp, double>(M.val, m*n));\n}\n\ntemplate<typename _Tp, int m, int n> static inline\ndouble norm(const Matx<_Tp, m, n>& M, int normType)\n{\n    switch(normType) {\n    case NORM_INF:\n        return (double)normInf<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n);\n    case NORM_L1:\n        return (double)normL1<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n);\n    case NORM_L2SQR:\n        return (double)normL2Sqr<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n);\n    default:\n    case NORM_L2:\n        return std::sqrt((double)normL2Sqr<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n));\n    }\n}\n\n\n\n//////////////////////////////// matx comma initializer //////////////////////////////////\n\ntemplate<typename _Tp, typename _T2, int m, int n> static inline\nMatxCommaInitializer<_Tp, m, n> operator << (const Matx<_Tp, m, n>& mtx, _T2 val)\n{\n    MatxCommaInitializer<_Tp, m, n> commaInitializer((Matx<_Tp, m, n>*)&mtx);\n    return (commaInitializer, val);\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatxCommaInitializer<_Tp, m, n>::MatxCommaInitializer(Matx<_Tp, m, n>* _mtx)\n    : dst(_mtx), idx(0)\n{}\n\ntemplate<typename _Tp, int m, int n> template<typename _T2> inline\nMatxCommaInitializer<_Tp, m, n>& MatxCommaInitializer<_Tp, m, n>::operator , (_T2 value)\n{\n    CV_DbgAssert( idx < m*n );\n    dst->val[idx++] = saturate_cast<_Tp>(value);\n    return *this;\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, m, n> MatxCommaInitializer<_Tp, m, n>::operator *() const\n{\n    CV_DbgAssert( idx == n*m );\n    return *dst;\n}\n\n\n\n/////////////////////////////////// Vec Implementation ///////////////////////////////////\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec() {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0)\n    : Matx<_Tp, cn, 1>(v0) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0, _Tp v1)\n    : Matx<_Tp, cn, 1>(v0, v1) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2)\n    : Matx<_Tp, cn, 1>(v0, v1, v2) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3)\n    : Matx<_Tp, cn, 1>(v0, v1, v2, v3) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4)\n    : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5)\n    : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6)\n    : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7)\n    : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8)\n    : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9)\n    : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11, _Tp v12, _Tp v13)\n    : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(const _Tp* values)\n    : Matx<_Tp, cn, 1>(values) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m)\n    : Matx<_Tp, cn, 1>(m.val) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp op)\n    : Matx<_Tp, cn, 1>(a, b, op) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp op)\n    : Matx<_Tp, cn, 1>(a, b, op) {}\n\ntemplate<typename _Tp, int cn> template<typename _T2> inline\nVec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op)\n    : Matx<_Tp, cn, 1>(a, alpha, op) {}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha)\n{\n    Vec v;\n    for( int i = 0; i < cn; i++ ) v.val[i] = alpha;\n    return v;\n}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn> Vec<_Tp, cn>::mul(const Vec<_Tp, cn>& v) const\n{\n    Vec<_Tp, cn> w;\n    for( int i = 0; i < cn; i++ ) w.val[i] = saturate_cast<_Tp>(this->val[i]*v.val[i]);\n    return w;\n}\n\ntemplate<> inline\nVec<float, 2> Vec<float, 2>::conj() const\n{\n    return cv::internal::conjugate(*this);\n}\n\ntemplate<> inline\nVec<double, 2> Vec<double, 2>::conj() const\n{\n    return cv::internal::conjugate(*this);\n}\n\ntemplate<> inline\nVec<float, 4> Vec<float, 4>::conj() const\n{\n    return cv::internal::conjugate(*this);\n}\n\ntemplate<> inline\nVec<double, 4> Vec<double, 4>::conj() const\n{\n    return cv::internal::conjugate(*this);\n}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>&) const\n{\n    CV_StaticAssert(cn == 3, \"for arbitrary-size vector there is no cross-product defined\");\n    return Vec<_Tp, cn>();\n}\n\ntemplate<> inline\nVec<float, 3> Vec<float, 3>::cross(const Vec<float, 3>& v) const\n{\n    return Vec<float,3>(val[1]*v.val[2] - val[2]*v.val[1],\n                     val[2]*v.val[0] - val[0]*v.val[2],\n                     val[0]*v.val[1] - val[1]*v.val[0]);\n}\n\ntemplate<> inline\nVec<double, 3> Vec<double, 3>::cross(const Vec<double, 3>& v) const\n{\n    return Vec<double,3>(val[1]*v.val[2] - val[2]*v.val[1],\n                     val[2]*v.val[0] - val[0]*v.val[2],\n                     val[0]*v.val[1] - val[1]*v.val[0]);\n}\n\ntemplate<typename _Tp, int cn> template<typename T2> inline\nVec<_Tp, cn>::operator Vec<T2, cn>() const\n{\n    Vec<T2, cn> v;\n    for( int i = 0; i < cn; i++ ) v.val[i] = saturate_cast<T2>(this->val[i]);\n    return v;\n}\n\ntemplate<typename _Tp, int cn> inline\nconst _Tp& Vec<_Tp, cn>::operator [](int i) const\n{\n    CV_DbgAssert( (unsigned)i < (unsigned)cn );\n    return this->val[i];\n}\n\ntemplate<typename _Tp, int cn> inline\n_Tp& Vec<_Tp, cn>::operator [](int i)\n{\n    CV_DbgAssert( (unsigned)i < (unsigned)cn );\n    return this->val[i];\n}\n\ntemplate<typename _Tp, int cn> inline\nconst _Tp& Vec<_Tp, cn>::operator ()(int i) const\n{\n    CV_DbgAssert( (unsigned)i < (unsigned)cn );\n    return this->val[i];\n}\n\ntemplate<typename _Tp, int cn> inline\n_Tp& Vec<_Tp, cn>::operator ()(int i)\n{\n    CV_DbgAssert( (unsigned)i < (unsigned)cn );\n    return this->val[i];\n}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn> normalize(const Vec<_Tp, cn>& v)\n{\n    double nv = norm(v);\n    return v * (nv ? 1./nv : 0.);\n}\n\n\n\n//////////////////////////////// matx comma initializer //////////////////////////////////\n\n\ntemplate<typename _Tp, typename _T2, int cn> static inline\nVecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val)\n{\n    VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec);\n    return (commaInitializer, val);\n}\n\ntemplate<typename _Tp, int cn> inline\nVecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec)\n    : MatxCommaInitializer<_Tp, cn, 1>(_vec)\n{}\n\ntemplate<typename _Tp, int cn> template<typename _T2> inline\nVecCommaInitializer<_Tp, cn>& VecCommaInitializer<_Tp, cn>::operator , (_T2 value)\n{\n    CV_DbgAssert( this->idx < cn );\n    this->dst->val[this->idx++] = saturate_cast<_Tp>(value);\n    return *this;\n}\n\ntemplate<typename _Tp, int cn> inline\nVec<_Tp, cn> VecCommaInitializer<_Tp, cn>::operator *() const\n{\n    CV_DbgAssert( this->idx == cn );\n    return *this->dst;\n}\n\n//! @endcond\n\n///////////////////////////// Matx out-of-class operators ////////////////////////////////\n\n//! @relates cv::Matx\n//! @{\n\ntemplate<typename _Tp1, typename _Tp2, int m, int n> static inline\nMatx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b)\n{\n    for( int i = 0; i < m*n; i++ )\n        a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]);\n    return a;\n}\n\ntemplate<typename _Tp1, typename _Tp2, int m, int n> static inline\nMatx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b)\n{\n    for( int i = 0; i < m*n; i++ )\n        a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]);\n    return a;\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)\n{\n    return Matx<_Tp, m, n>(a, b, Matx_AddOp());\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)\n{\n    return Matx<_Tp, m, n>(a, b, Matx_SubOp());\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha)\n{\n    for( int i = 0; i < m*n; i++ )\n        a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);\n    return a;\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha)\n{\n    for( int i = 0; i < m*n; i++ )\n        a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);\n    return a;\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha)\n{\n    for( int i = 0; i < m*n; i++ )\n        a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);\n    return a;\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha)\n{\n    return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha)\n{\n    return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha)\n{\n    return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a)\n{\n    return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a)\n{\n    return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a)\n{\n    return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nMatx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a)\n{\n    return Matx<_Tp, m, n>(a, -1, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int m, int n, int l> static inline\nMatx<_Tp, m, n> operator * (const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b)\n{\n    return Matx<_Tp, m, n>(a, b, Matx_MatMulOp());\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nVec<_Tp, m> operator * (const Matx<_Tp, m, n>& a, const Vec<_Tp, n>& b)\n{\n    Matx<_Tp, m, 1> c(a, b, Matx_MatMulOp());\n    return (const Vec<_Tp, m>&)(c);\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nbool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)\n{\n    for( int i = 0; i < m*n; i++ )\n        if( a.val[i] != b.val[i] ) return false;\n    return true;\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nbool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)\n{\n    return !(a == b);\n}\n\n//! @}\n\n////////////////////////////// Vec out-of-class operators ////////////////////////////////\n\n//! @relates cv::Vec\n//! @{\n\ntemplate<typename _Tp1, typename _Tp2, int cn> static inline\nVec<_Tp1, cn>& operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b)\n{\n    for( int i = 0; i < cn; i++ )\n        a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]);\n    return a;\n}\n\ntemplate<typename _Tp1, typename _Tp2, int cn> static inline\nVec<_Tp1, cn>& operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b)\n{\n    for( int i = 0; i < cn; i++ )\n        a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]);\n    return a;\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b)\n{\n    return Vec<_Tp, cn>(a, b, Matx_AddOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator - (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b)\n{\n    return Vec<_Tp, cn>(a, b, Matx_SubOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, int alpha)\n{\n    for( int i = 0; i < cn; i++ )\n        a[i] = saturate_cast<_Tp>(a[i]*alpha);\n    return a;\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, float alpha)\n{\n    for( int i = 0; i < cn; i++ )\n        a[i] = saturate_cast<_Tp>(a[i]*alpha);\n    return a;\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha)\n{\n    for( int i = 0; i < cn; i++ )\n        a[i] = saturate_cast<_Tp>(a[i]*alpha);\n    return a;\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, int alpha)\n{\n    double ialpha = 1./alpha;\n    for( int i = 0; i < cn; i++ )\n        a[i] = saturate_cast<_Tp>(a[i]*ialpha);\n    return a;\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, float alpha)\n{\n    float ialpha = 1.f/alpha;\n    for( int i = 0; i < cn; i++ )\n        a[i] = saturate_cast<_Tp>(a[i]*ialpha);\n    return a;\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, double alpha)\n{\n    double ialpha = 1./alpha;\n    for( int i = 0; i < cn; i++ )\n        a[i] = saturate_cast<_Tp>(a[i]*ialpha);\n    return a;\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator * (const Vec<_Tp, cn>& a, int alpha)\n{\n    return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator * (int alpha, const Vec<_Tp, cn>& a)\n{\n    return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator * (const Vec<_Tp, cn>& a, float alpha)\n{\n    return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator * (float alpha, const Vec<_Tp, cn>& a)\n{\n    return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator * (const Vec<_Tp, cn>& a, double alpha)\n{\n    return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator * (double alpha, const Vec<_Tp, cn>& a)\n{\n    return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator / (const Vec<_Tp, cn>& a, int alpha)\n{\n    return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator / (const Vec<_Tp, cn>& a, float alpha)\n{\n    return Vec<_Tp, cn>(a, 1.f/alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator / (const Vec<_Tp, cn>& a, double alpha)\n{\n    return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp());\n}\n\ntemplate<typename _Tp, int cn> static inline\nVec<_Tp, cn> operator - (const Vec<_Tp, cn>& a)\n{\n    Vec<_Tp,cn> t;\n    for( int i = 0; i < cn; i++ ) t.val[i] = saturate_cast<_Tp>(-a.val[i]);\n    return t;\n}\n\ntemplate<typename _Tp> inline Vec<_Tp, 4> operator * (const Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2)\n{\n    return Vec<_Tp, 4>(saturate_cast<_Tp>(v1[0]*v2[0] - v1[1]*v2[1] - v1[2]*v2[2] - v1[3]*v2[3]),\n                       saturate_cast<_Tp>(v1[0]*v2[1] + v1[1]*v2[0] + v1[2]*v2[3] - v1[3]*v2[2]),\n                       saturate_cast<_Tp>(v1[0]*v2[2] - v1[1]*v2[3] + v1[2]*v2[0] + v1[3]*v2[1]),\n                       saturate_cast<_Tp>(v1[0]*v2[3] + v1[1]*v2[2] - v1[2]*v2[1] + v1[3]*v2[0]));\n}\n\ntemplate<typename _Tp> inline Vec<_Tp, 4>& operator *= (Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2)\n{\n    v1 = v1 * v2;\n    return v1;\n}\n\n//! @}\n\n} // cv\n\n#endif // __OPENCV_CORE_MATX_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/neon_utils.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_HAL_NEON_UTILS_HPP__\n#define __OPENCV_HAL_NEON_UTILS_HPP__\n\n#include \"opencv2/core/cvdef.h\"\n\n//! @addtogroup core_utils_neon\n//! @{\n\n#if CV_NEON\n\ninline int32x2_t cv_vrnd_s32_f32(float32x2_t v)\n{\n    static int32x2_t v_sign = vdup_n_s32(1 << 31),\n        v_05 = vreinterpret_s32_f32(vdup_n_f32(0.5f));\n\n    int32x2_t v_addition = vorr_s32(v_05, vand_s32(v_sign, vreinterpret_s32_f32(v)));\n    return vcvt_s32_f32(vadd_f32(v, vreinterpret_f32_s32(v_addition)));\n}\n\ninline int32x4_t cv_vrndq_s32_f32(float32x4_t v)\n{\n    static int32x4_t v_sign = vdupq_n_s32(1 << 31),\n        v_05 = vreinterpretq_s32_f32(vdupq_n_f32(0.5f));\n\n    int32x4_t v_addition = vorrq_s32(v_05, vandq_s32(v_sign, vreinterpretq_s32_f32(v)));\n    return vcvtq_s32_f32(vaddq_f32(v, vreinterpretq_f32_s32(v_addition)));\n}\n\ninline uint32x2_t cv_vrnd_u32_f32(float32x2_t v)\n{\n    static float32x2_t v_05 = vdup_n_f32(0.5f);\n    return vcvt_u32_f32(vadd_f32(v, v_05));\n}\n\ninline uint32x4_t cv_vrndq_u32_f32(float32x4_t v)\n{\n    static float32x4_t v_05 = vdupq_n_f32(0.5f);\n    return vcvtq_u32_f32(vaddq_f32(v, v_05));\n}\n\ninline float32x4_t cv_vrecpq_f32(float32x4_t val)\n{\n    float32x4_t reciprocal = vrecpeq_f32(val);\n    reciprocal = vmulq_f32(vrecpsq_f32(val, reciprocal), reciprocal);\n    reciprocal = vmulq_f32(vrecpsq_f32(val, reciprocal), reciprocal);\n    return reciprocal;\n}\n\ninline float32x2_t cv_vrecp_f32(float32x2_t val)\n{\n    float32x2_t reciprocal = vrecpe_f32(val);\n    reciprocal = vmul_f32(vrecps_f32(val, reciprocal), reciprocal);\n    reciprocal = vmul_f32(vrecps_f32(val, reciprocal), reciprocal);\n    return reciprocal;\n}\n\ninline float32x4_t cv_vrsqrtq_f32(float32x4_t val)\n{\n    float32x4_t e = vrsqrteq_f32(val);\n    e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(e, e), val), e);\n    e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(e, e), val), e);\n    return e;\n}\n\ninline float32x2_t cv_vrsqrt_f32(float32x2_t val)\n{\n    float32x2_t e = vrsqrte_f32(val);\n    e = vmul_f32(vrsqrts_f32(vmul_f32(e, e), val), e);\n    e = vmul_f32(vrsqrts_f32(vmul_f32(e, e), val), e);\n    return e;\n}\n\ninline float32x4_t cv_vsqrtq_f32(float32x4_t val)\n{\n    return cv_vrecpq_f32(cv_vrsqrtq_f32(val));\n}\n\ninline float32x2_t cv_vsqrt_f32(float32x2_t val)\n{\n    return cv_vrecp_f32(cv_vrsqrt_f32(val));\n}\n\n#endif\n\n//! @}\n\n#endif // __OPENCV_HAL_NEON_UTILS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/ocl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the OpenCV Foundation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OPENCL_HPP__\n#define __OPENCV_OPENCL_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv { namespace ocl {\n\n//! @addtogroup core_opencl\n//! @{\n\nCV_EXPORTS_W bool haveOpenCL();\nCV_EXPORTS_W bool useOpenCL();\nCV_EXPORTS_W bool haveAmdBlas();\nCV_EXPORTS_W bool haveAmdFft();\nCV_EXPORTS_W void setUseOpenCL(bool flag);\nCV_EXPORTS_W void finish();\n\nCV_EXPORTS bool haveSVM();\n\nclass CV_EXPORTS Context;\nclass CV_EXPORTS Device;\nclass CV_EXPORTS Kernel;\nclass CV_EXPORTS Program;\nclass CV_EXPORTS ProgramSource;\nclass CV_EXPORTS Queue;\nclass CV_EXPORTS PlatformInfo;\nclass CV_EXPORTS Image2D;\n\nclass CV_EXPORTS Device\n{\npublic:\n    Device();\n    explicit Device(void* d);\n    Device(const Device& d);\n    Device& operator = (const Device& d);\n    ~Device();\n\n    void set(void* d);\n\n    enum\n    {\n        TYPE_DEFAULT     = (1 << 0),\n        TYPE_CPU         = (1 << 1),\n        TYPE_GPU         = (1 << 2),\n        TYPE_ACCELERATOR = (1 << 3),\n        TYPE_DGPU        = TYPE_GPU + (1 << 16),\n        TYPE_IGPU        = TYPE_GPU + (1 << 17),\n        TYPE_ALL         = 0xFFFFFFFF\n    };\n\n    String name() const;\n    String extensions() const;\n    String version() const;\n    String vendorName() const;\n    String OpenCL_C_Version() const;\n    String OpenCLVersion() const;\n    int deviceVersionMajor() const;\n    int deviceVersionMinor() const;\n    String driverVersion() const;\n    void* ptr() const;\n\n    int type() const;\n\n    int addressBits() const;\n    bool available() const;\n    bool compilerAvailable() const;\n    bool linkerAvailable() const;\n\n    enum\n    {\n        FP_DENORM=(1 << 0),\n        FP_INF_NAN=(1 << 1),\n        FP_ROUND_TO_NEAREST=(1 << 2),\n        FP_ROUND_TO_ZERO=(1 << 3),\n        FP_ROUND_TO_INF=(1 << 4),\n        FP_FMA=(1 << 5),\n        FP_SOFT_FLOAT=(1 << 6),\n        FP_CORRECTLY_ROUNDED_DIVIDE_SQRT=(1 << 7)\n    };\n    int doubleFPConfig() const;\n    int singleFPConfig() const;\n    int halfFPConfig() const;\n\n    bool endianLittle() const;\n    bool errorCorrectionSupport() const;\n\n    enum\n    {\n        EXEC_KERNEL=(1 << 0),\n        EXEC_NATIVE_KERNEL=(1 << 1)\n    };\n    int executionCapabilities() const;\n\n    size_t globalMemCacheSize() const;\n\n    enum\n    {\n        NO_CACHE=0,\n        READ_ONLY_CACHE=1,\n        READ_WRITE_CACHE=2\n    };\n    int globalMemCacheType() const;\n    int globalMemCacheLineSize() const;\n    size_t globalMemSize() const;\n\n    size_t localMemSize() const;\n    enum\n    {\n        NO_LOCAL_MEM=0,\n        LOCAL_IS_LOCAL=1,\n        LOCAL_IS_GLOBAL=2\n    };\n    int localMemType() const;\n    bool hostUnifiedMemory() const;\n\n    bool imageSupport() const;\n\n    bool imageFromBufferSupport() const;\n    uint imagePitchAlignment() const;\n    uint imageBaseAddressAlignment() const;\n\n    size_t image2DMaxWidth() const;\n    size_t image2DMaxHeight() const;\n\n    size_t image3DMaxWidth() const;\n    size_t image3DMaxHeight() const;\n    size_t image3DMaxDepth() const;\n\n    size_t imageMaxBufferSize() const;\n    size_t imageMaxArraySize() const;\n\n    enum\n    {\n        UNKNOWN_VENDOR=0,\n        VENDOR_AMD=1,\n        VENDOR_INTEL=2,\n        VENDOR_NVIDIA=3\n    };\n    int vendorID() const;\n    // FIXIT\n    // dev.isAMD() doesn't work for OpenCL CPU devices from AMD OpenCL platform.\n    // This method should use platform name instead of vendor name.\n    // After fix restore code in arithm.cpp: ocl_compare()\n    inline bool isAMD() const { return vendorID() == VENDOR_AMD; }\n    inline bool isIntel() const { return vendorID() == VENDOR_INTEL; }\n    inline bool isNVidia() const { return vendorID() == VENDOR_NVIDIA; }\n\n    int maxClockFrequency() const;\n    int maxComputeUnits() const;\n    int maxConstantArgs() const;\n    size_t maxConstantBufferSize() const;\n\n    size_t maxMemAllocSize() const;\n    size_t maxParameterSize() const;\n\n    int maxReadImageArgs() const;\n    int maxWriteImageArgs() const;\n    int maxSamplers() const;\n\n    size_t maxWorkGroupSize() const;\n    int maxWorkItemDims() const;\n    void maxWorkItemSizes(size_t*) const;\n\n    int memBaseAddrAlign() const;\n\n    int nativeVectorWidthChar() const;\n    int nativeVectorWidthShort() const;\n    int nativeVectorWidthInt() const;\n    int nativeVectorWidthLong() const;\n    int nativeVectorWidthFloat() const;\n    int nativeVectorWidthDouble() const;\n    int nativeVectorWidthHalf() const;\n\n    int preferredVectorWidthChar() const;\n    int preferredVectorWidthShort() const;\n    int preferredVectorWidthInt() const;\n    int preferredVectorWidthLong() const;\n    int preferredVectorWidthFloat() const;\n    int preferredVectorWidthDouble() const;\n    int preferredVectorWidthHalf() const;\n\n    size_t printfBufferSize() const;\n    size_t profilingTimerResolution() const;\n\n    static const Device& getDefault();\n\nprotected:\n    struct Impl;\n    Impl* p;\n};\n\n\nclass CV_EXPORTS Context\n{\npublic:\n    Context();\n    explicit Context(int dtype);\n    ~Context();\n    Context(const Context& c);\n    Context& operator = (const Context& c);\n\n    bool create();\n    bool create(int dtype);\n    size_t ndevices() const;\n    const Device& device(size_t idx) const;\n    Program getProg(const ProgramSource& prog,\n                    const String& buildopt, String& errmsg);\n\n    static Context& getDefault(bool initialize = true);\n    void* ptr() const;\n\n    friend void initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device);\n\n    bool useSVM() const;\n    void setUseSVM(bool enabled);\n\n    struct Impl;\n    Impl* p;\n};\n\nclass CV_EXPORTS Platform\n{\npublic:\n    Platform();\n    ~Platform();\n    Platform(const Platform& p);\n    Platform& operator = (const Platform& p);\n\n    void* ptr() const;\n    static Platform& getDefault();\n\n    friend void initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device);\nprotected:\n    struct Impl;\n    Impl* p;\n};\n\n/*\n//! @brief Attaches OpenCL context to OpenCV\n//\n//! @note Note:\n//    OpenCV will check if available OpenCL platform has platformName name,\n//    then assign context to OpenCV and call clRetainContext function.\n//    The deviceID device will be used as target device and new command queue\n//    will be created.\n//\n// Params:\n//! @param platformName - name of OpenCL platform to attach,\n//!                       this string is used to check if platform is available\n//!                       to OpenCV at runtime\n//! @param platfromID   - ID of platform attached context was created for\n//! @param context      - OpenCL context to be attached to OpenCV\n//! @param deviceID     - ID of device, must be created from attached context\n*/\nCV_EXPORTS void attachContext(const String& platformName, void* platformID, void* context, void* deviceID);\n\n/*\n//! @brief Convert OpenCL buffer to UMat\n//\n//! @note Note:\n//   OpenCL buffer (cl_mem_buffer) should contain 2D image data, compatible with OpenCV.\n//   Memory content is not copied from clBuffer to UMat. Instead, buffer handle assigned\n//   to UMat and clRetainMemObject is called.\n//\n// Params:\n//! @param  cl_mem_buffer - source clBuffer handle\n//! @param  step          - num of bytes in single row\n//! @param  rows          - number of rows\n//! @param  cols          - number of cols\n//! @param  type          - OpenCV type of image\n//! @param  dst           - destination UMat\n*/\nCV_EXPORTS void convertFromBuffer(void* cl_mem_buffer, size_t step, int rows, int cols, int type, UMat& dst);\n\n/*\n//! @brief Convert OpenCL image2d_t to UMat\n//\n//! @note Note:\n//   OpenCL image2d_t (cl_mem_image), should be compatible with OpenCV\n//   UMat formats.\n//   Memory content is copied from image to UMat with\n//   clEnqueueCopyImageToBuffer function.\n//\n// Params:\n//! @param  cl_mem_image - source image2d_t handle\n//! @param  dst          - destination UMat\n*/\nCV_EXPORTS void convertFromImage(void* cl_mem_image, UMat& dst);\n\n// TODO Move to internal header\nvoid initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device);\n\nclass CV_EXPORTS Queue\n{\npublic:\n    Queue();\n    explicit Queue(const Context& c, const Device& d=Device());\n    ~Queue();\n    Queue(const Queue& q);\n    Queue& operator = (const Queue& q);\n\n    bool create(const Context& c=Context(), const Device& d=Device());\n    void finish();\n    void* ptr() const;\n    static Queue& getDefault();\n\nprotected:\n    struct Impl;\n    Impl* p;\n};\n\n\nclass CV_EXPORTS KernelArg\n{\npublic:\n    enum { LOCAL=1, READ_ONLY=2, WRITE_ONLY=4, READ_WRITE=6, CONSTANT=8, PTR_ONLY = 16, NO_SIZE=256 };\n    KernelArg(int _flags, UMat* _m, int wscale=1, int iwscale=1, const void* _obj=0, size_t _sz=0);\n    KernelArg();\n\n    static KernelArg Local() { return KernelArg(LOCAL, 0); }\n    static KernelArg PtrWriteOnly(const UMat& m)\n    { return KernelArg(PTR_ONLY+WRITE_ONLY, (UMat*)&m); }\n    static KernelArg PtrReadOnly(const UMat& m)\n    { return KernelArg(PTR_ONLY+READ_ONLY, (UMat*)&m); }\n    static KernelArg PtrReadWrite(const UMat& m)\n    { return KernelArg(PTR_ONLY+READ_WRITE, (UMat*)&m); }\n    static KernelArg ReadWrite(const UMat& m, int wscale=1, int iwscale=1)\n    { return KernelArg(READ_WRITE, (UMat*)&m, wscale, iwscale); }\n    static KernelArg ReadWriteNoSize(const UMat& m, int wscale=1, int iwscale=1)\n    { return KernelArg(READ_WRITE+NO_SIZE, (UMat*)&m, wscale, iwscale); }\n    static KernelArg ReadOnly(const UMat& m, int wscale=1, int iwscale=1)\n    { return KernelArg(READ_ONLY, (UMat*)&m, wscale, iwscale); }\n    static KernelArg WriteOnly(const UMat& m, int wscale=1, int iwscale=1)\n    { return KernelArg(WRITE_ONLY, (UMat*)&m, wscale, iwscale); }\n    static KernelArg ReadOnlyNoSize(const UMat& m, int wscale=1, int iwscale=1)\n    { return KernelArg(READ_ONLY+NO_SIZE, (UMat*)&m, wscale, iwscale); }\n    static KernelArg WriteOnlyNoSize(const UMat& m, int wscale=1, int iwscale=1)\n    { return KernelArg(WRITE_ONLY+NO_SIZE, (UMat*)&m, wscale, iwscale); }\n    static KernelArg Constant(const Mat& m);\n    template<typename _Tp> static KernelArg Constant(const _Tp* arr, size_t n)\n    { return KernelArg(CONSTANT, 0, 1, 1, (void*)arr, n); }\n\n    int flags;\n    UMat* m;\n    const void* obj;\n    size_t sz;\n    int wscale, iwscale;\n};\n\n\nclass CV_EXPORTS Kernel\n{\npublic:\n    Kernel();\n    Kernel(const char* kname, const Program& prog);\n    Kernel(const char* kname, const ProgramSource& prog,\n           const String& buildopts = String(), String* errmsg=0);\n    ~Kernel();\n    Kernel(const Kernel& k);\n    Kernel& operator = (const Kernel& k);\n\n    bool empty() const;\n    bool create(const char* kname, const Program& prog);\n    bool create(const char* kname, const ProgramSource& prog,\n                const String& buildopts, String* errmsg=0);\n\n    int set(int i, const void* value, size_t sz);\n    int set(int i, const Image2D& image2D);\n    int set(int i, const UMat& m);\n    int set(int i, const KernelArg& arg);\n    template<typename _Tp> int set(int i, const _Tp& value)\n    { return set(i, &value, sizeof(value)); }\n\n    template<typename _Tp0>\n    Kernel& args(const _Tp0& a0)\n    {\n        set(0, a0); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1)\n    {\n        int i = set(0, a0); set(i, a1); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2)\n    {\n        int i = set(0, a0); i = set(i, a1); set(i, a2); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3, typename _Tp4>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2,\n                 const _Tp3& a3, const _Tp4& a4)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2);\n        i = set(i, a3); set(i, a4); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2,\n             typename _Tp3, typename _Tp4, typename _Tp5>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2,\n                 const _Tp3& a3, const _Tp4& a4, const _Tp5& a5)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2);\n        i = set(i, a3); i = set(i, a4); set(i, a5); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,\n             typename _Tp4, typename _Tp5, typename _Tp6>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3,\n                 const _Tp4& a4, const _Tp5& a5, const _Tp6& a6)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3);\n        i = set(i, a4); i = set(i, a5); set(i, a6); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,\n             typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3,\n                 const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3);\n        i = set(i, a4); i = set(i, a5); i = set(i, a6); set(i, a7); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3, typename _Tp4,\n             typename _Tp5, typename _Tp6, typename _Tp7, typename _Tp8>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3,\n                 const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7,\n                 const _Tp8& a8)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4);\n        i = set(i, a5); i = set(i, a6); i = set(i, a7); set(i, a8); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3, typename _Tp4,\n             typename _Tp5, typename _Tp6, typename _Tp7, typename _Tp8, typename _Tp9>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3,\n                 const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7,\n                 const _Tp8& a8, const _Tp9& a9)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5);\n        i = set(i, a6); i = set(i, a7); i = set(i, a8); set(i, a9); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,\n             typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7,\n             typename _Tp8, typename _Tp9, typename _Tp10>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3,\n                 const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7,\n                 const _Tp8& a8, const _Tp9& a9, const _Tp10& a10)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5);\n        i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); set(i, a10); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,\n             typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7,\n             typename _Tp8, typename _Tp9, typename _Tp10, typename _Tp11>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3,\n                 const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7,\n                 const _Tp8& a8, const _Tp9& a9, const _Tp10& a10, const _Tp11& a11)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5);\n        i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); set(i, a11); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,\n             typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7,\n             typename _Tp8, typename _Tp9, typename _Tp10, typename _Tp11, typename _Tp12>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3,\n                 const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7,\n                 const _Tp8& a8, const _Tp9& a9, const _Tp10& a10, const _Tp11& a11,\n                 const _Tp12& a12)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5);\n        i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11);\n        set(i, a12); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,\n             typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7,\n             typename _Tp8, typename _Tp9, typename _Tp10, typename _Tp11, typename _Tp12,\n             typename _Tp13>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3,\n                 const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7,\n                 const _Tp8& a8, const _Tp9& a9, const _Tp10& a10, const _Tp11& a11,\n                 const _Tp12& a12, const _Tp13& a13)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5);\n        i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11);\n        i = set(i, a12); set(i, a13); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,\n             typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7,\n             typename _Tp8, typename _Tp9, typename _Tp10, typename _Tp11, typename _Tp12,\n             typename _Tp13, typename _Tp14>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3,\n                 const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7,\n                 const _Tp8& a8, const _Tp9& a9, const _Tp10& a10, const _Tp11& a11,\n                 const _Tp12& a12, const _Tp13& a13, const _Tp14& a14)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5);\n        i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11);\n        i = set(i, a12); i = set(i, a13); set(i, a14); return *this;\n    }\n\n    template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,\n             typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7,\n             typename _Tp8, typename _Tp9, typename _Tp10, typename _Tp11, typename _Tp12,\n             typename _Tp13, typename _Tp14, typename _Tp15>\n    Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3,\n                 const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7,\n                 const _Tp8& a8, const _Tp9& a9, const _Tp10& a10, const _Tp11& a11,\n                 const _Tp12& a12, const _Tp13& a13, const _Tp14& a14, const _Tp15& a15)\n    {\n        int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5);\n        i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11);\n        i = set(i, a12); i = set(i, a13); i = set(i, a14); set(i, a15); return *this;\n    }\n\n    bool run(int dims, size_t globalsize[],\n             size_t localsize[], bool sync, const Queue& q=Queue());\n    bool runTask(bool sync, const Queue& q=Queue());\n\n    size_t workGroupSize() const;\n    size_t preferedWorkGroupSizeMultiple() const;\n    bool compileWorkGroupSize(size_t wsz[]) const;\n    size_t localMemSize() const;\n\n    void* ptr() const;\n    struct Impl;\n\nprotected:\n    Impl* p;\n};\n\nclass CV_EXPORTS Program\n{\npublic:\n    Program();\n    Program(const ProgramSource& src,\n            const String& buildflags, String& errmsg);\n    explicit Program(const String& buf);\n    Program(const Program& prog);\n\n    Program& operator = (const Program& prog);\n    ~Program();\n\n    bool create(const ProgramSource& src,\n                const String& buildflags, String& errmsg);\n    bool read(const String& buf, const String& buildflags);\n    bool write(String& buf) const;\n\n    const ProgramSource& source() const;\n    void* ptr() const;\n\n    String getPrefix() const;\n    static String getPrefix(const String& buildflags);\n\nprotected:\n    struct Impl;\n    Impl* p;\n};\n\n\nclass CV_EXPORTS ProgramSource\n{\npublic:\n    typedef uint64 hash_t;\n\n    ProgramSource();\n    explicit ProgramSource(const String& prog);\n    explicit ProgramSource(const char* prog);\n    ~ProgramSource();\n    ProgramSource(const ProgramSource& prog);\n    ProgramSource& operator = (const ProgramSource& prog);\n\n    const String& source() const;\n    hash_t hash() const;\n\nprotected:\n    struct Impl;\n    Impl* p;\n};\n\nclass CV_EXPORTS PlatformInfo\n{\npublic:\n    PlatformInfo();\n    explicit PlatformInfo(void* id);\n    ~PlatformInfo();\n\n    PlatformInfo(const PlatformInfo& i);\n    PlatformInfo& operator =(const PlatformInfo& i);\n\n    String name() const;\n    String vendor() const;\n    String version() const;\n    int deviceNumber() const;\n    void getDevice(Device& device, int d) const;\n\nprotected:\n    struct Impl;\n    Impl* p;\n};\n\nCV_EXPORTS const char* convertTypeStr(int sdepth, int ddepth, int cn, char* buf);\nCV_EXPORTS const char* typeToStr(int t);\nCV_EXPORTS const char* memopTypeToStr(int t);\nCV_EXPORTS const char* vecopTypeToStr(int t);\nCV_EXPORTS String kernelToStr(InputArray _kernel, int ddepth = -1, const char * name = NULL);\nCV_EXPORTS void getPlatfomsInfo(std::vector<PlatformInfo>& platform_info);\n\n\nenum OclVectorStrategy\n{\n    // all matrices have its own vector width\n    OCL_VECTOR_OWN = 0,\n    // all matrices have maximal vector width among all matrices\n    // (useful for cases when matrices have different data types)\n    OCL_VECTOR_MAX = 1,\n\n    // default strategy\n    OCL_VECTOR_DEFAULT = OCL_VECTOR_OWN\n};\n\nCV_EXPORTS int predictOptimalVectorWidth(InputArray src1, InputArray src2 = noArray(), InputArray src3 = noArray(),\n                                         InputArray src4 = noArray(), InputArray src5 = noArray(), InputArray src6 = noArray(),\n                                         InputArray src7 = noArray(), InputArray src8 = noArray(), InputArray src9 = noArray(),\n                                         OclVectorStrategy strat = OCL_VECTOR_DEFAULT);\n\nCV_EXPORTS int checkOptimalVectorWidth(const int *vectorWidths,\n                                       InputArray src1, InputArray src2 = noArray(), InputArray src3 = noArray(),\n                                       InputArray src4 = noArray(), InputArray src5 = noArray(), InputArray src6 = noArray(),\n                                       InputArray src7 = noArray(), InputArray src8 = noArray(), InputArray src9 = noArray(),\n                                       OclVectorStrategy strat = OCL_VECTOR_DEFAULT);\n\n// with OCL_VECTOR_MAX strategy\nCV_EXPORTS int predictOptimalVectorWidthMax(InputArray src1, InputArray src2 = noArray(), InputArray src3 = noArray(),\n                                            InputArray src4 = noArray(), InputArray src5 = noArray(), InputArray src6 = noArray(),\n                                            InputArray src7 = noArray(), InputArray src8 = noArray(), InputArray src9 = noArray());\n\nCV_EXPORTS void buildOptionsAddMatrixDescription(String& buildOptions, const String& name, InputArray _m);\n\nclass CV_EXPORTS Image2D\n{\npublic:\n    Image2D();\n\n    // src:     The UMat from which to get image properties and data\n    // norm:    Flag to enable the use of normalized channel data types\n    // alias:   Flag indicating that the image should alias the src UMat.\n    //          If true, changes to the image or src will be reflected in\n    //          both objects.\n    explicit Image2D(const UMat &src, bool norm = false, bool alias = false);\n    Image2D(const Image2D & i);\n    ~Image2D();\n\n    Image2D & operator = (const Image2D & i);\n\n    // Indicates if creating an aliased image should succeed.  Depends on the\n    // underlying platform and the dimensions of the UMat.\n    static bool canCreateAlias(const UMat &u);\n\n    // Indicates if the image format is supported.\n    static bool isFormatSupported(int depth, int cn, bool norm);\n\n    void* ptr() const;\nprotected:\n    struct Impl;\n    Impl* p;\n};\n\n\nCV_EXPORTS MatAllocator* getOpenCLAllocator();\n\n\n#ifdef __OPENCV_BUILD\nnamespace internal {\n\nCV_EXPORTS bool isPerformanceCheckBypassed();\n#define OCL_PERFORMANCE_CHECK(condition) (cv::ocl::internal::isPerformanceCheckBypassed() || (condition))\n\nCV_EXPORTS bool isCLBuffer(UMat& u);\n\n} // namespace internal\n#endif\n\n//! @}\n\n}}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/ocl_genbase.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the OpenCV Foundation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OPENCL_GENBASE_HPP__\n#define __OPENCV_OPENCL_GENBASE_HPP__\n\nnamespace cv\n{\nnamespace ocl\n{\n\n//! @cond IGNORED\n\nstruct ProgramEntry\n{\n    const char* name;\n    const char* programStr;\n    const char* programHash;\n};\n\n//! @endcond\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/opengl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_OPENGL_HPP__\n#define __OPENCV_CORE_OPENGL_HPP__\n\n#ifndef __cplusplus\n#  error opengl.hpp header must be compiled as C++\n#endif\n\n#include \"opencv2/core.hpp\"\n#include \"ocl.hpp\"\n\nnamespace cv { namespace ogl {\n\n/** @addtogroup core_opengl\nThis section describes OpenGL interoperability.\n\nTo enable OpenGL support, configure OpenCV using CMake with WITH_OPENGL=ON . Currently OpenGL is\nsupported only with WIN32, GTK and Qt backends on Windows and Linux (MacOS and Android are not\nsupported). For GTK backend gtkglext-1.0 library is required.\n\nTo use OpenGL functionality you should first create OpenGL context (window or frame buffer). You can\ndo this with namedWindow function or with other OpenGL toolkit (GLUT, for example).\n*/\n//! @{\n\n/////////////////// OpenGL Objects ///////////////////\n\n/** @brief Smart pointer for OpenGL buffer object with reference counting.\n\nBuffer Objects are OpenGL objects that store an array of unformatted memory allocated by the OpenGL\ncontext. These can be used to store vertex data, pixel data retrieved from images or the\nframebuffer, and a variety of other things.\n\nogl::Buffer has interface similar with Mat interface and represents 2D array memory.\n\nogl::Buffer supports memory transfers between host and device and also can be mapped to CUDA memory.\n */\nclass CV_EXPORTS Buffer\n{\npublic:\n    /** @brief The target defines how you intend to use the buffer object.\n    */\n    enum Target\n    {\n        ARRAY_BUFFER         = 0x8892, //!< The buffer will be used as a source for vertex data\n        ELEMENT_ARRAY_BUFFER = 0x8893, //!< The buffer will be used for indices (in glDrawElements, for example)\n        PIXEL_PACK_BUFFER    = 0x88EB, //!< The buffer will be used for reading from OpenGL textures\n        PIXEL_UNPACK_BUFFER  = 0x88EC  //!< The buffer will be used for writing to OpenGL textures\n    };\n\n    enum Access\n    {\n        READ_ONLY  = 0x88B8,\n        WRITE_ONLY = 0x88B9,\n        READ_WRITE = 0x88BA\n    };\n\n    /** @brief The constructors.\n\n    Creates empty ogl::Buffer object, creates ogl::Buffer object from existed buffer ( abufId\n    parameter), allocates memory for ogl::Buffer object or copies from host/device memory.\n     */\n    Buffer();\n\n    /** @overload\n    @param arows Number of rows in a 2D array.\n    @param acols Number of columns in a 2D array.\n    @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details.\n    @param abufId Buffer object name.\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n    */\n    Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false);\n\n    /** @overload\n    @param asize 2D array size.\n    @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details.\n    @param abufId Buffer object name.\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n    */\n    Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false);\n\n    /** @overload\n    @param arows Number of rows in a 2D array.\n    @param acols Number of columns in a 2D array.\n    @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details.\n    @param target Buffer usage. See cv::ogl::Buffer::Target .\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n    */\n    Buffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);\n\n    /** @overload\n    @param asize 2D array size.\n    @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details.\n    @param target Buffer usage. See cv::ogl::Buffer::Target .\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n    */\n    Buffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);\n\n    /** @overload\n    @param arr Input array (host or device memory, it can be Mat , cuda::GpuMat or std::vector ).\n    @param target Buffer usage. See cv::ogl::Buffer::Target .\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n    */\n    explicit Buffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false);\n\n    /** @brief Allocates memory for ogl::Buffer object.\n\n    @param arows Number of rows in a 2D array.\n    @param acols Number of columns in a 2D array.\n    @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details.\n    @param target Buffer usage. See cv::ogl::Buffer::Target .\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n     */\n    void create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);\n\n    /** @overload\n    @param asize 2D array size.\n    @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details.\n    @param target Buffer usage. See cv::ogl::Buffer::Target .\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n    */\n    void create(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);\n\n    /** @brief Decrements the reference counter and destroys the buffer object if needed.\n\n    The function will call setAutoRelease(true) .\n     */\n    void release();\n\n    /** @brief Sets auto release mode.\n\n    The lifetime of the OpenGL object is tied to the lifetime of the context. If OpenGL context was\n    bound to a window it could be released at any time (user can close a window). If object's destructor\n    is called after destruction of the context it will cause an error. Thus ogl::Buffer doesn't destroy\n    OpenGL object in destructor by default (all OpenGL resources will be released with OpenGL context).\n    This function can force ogl::Buffer destructor to destroy OpenGL object.\n    @param flag Auto release mode (if true, release will be called in object's destructor).\n     */\n    void setAutoRelease(bool flag);\n\n    /** @brief Copies from host/device memory to OpenGL buffer.\n    @param arr Input array (host or device memory, it can be Mat , cuda::GpuMat or std::vector ).\n    @param target Buffer usage. See cv::ogl::Buffer::Target .\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n     */\n    void copyFrom(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false);\n\n    /** @overload */\n    void copyFrom(InputArray arr, cuda::Stream& stream, Target target = ARRAY_BUFFER, bool autoRelease = false);\n\n    /** @brief Copies from OpenGL buffer to host/device memory or another OpenGL buffer object.\n\n    @param arr Destination array (host or device memory, can be Mat , cuda::GpuMat , std::vector or\n    ogl::Buffer ).\n     */\n    void copyTo(OutputArray arr) const;\n\n    /** @overload */\n    void copyTo(OutputArray arr, cuda::Stream& stream) const;\n\n    /** @brief Creates a full copy of the buffer object and the underlying data.\n\n    @param target Buffer usage for destination buffer.\n    @param autoRelease Auto release mode for destination buffer.\n     */\n    Buffer clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const;\n\n    /** @brief Binds OpenGL buffer to the specified buffer binding point.\n\n    @param target Binding point. See cv::ogl::Buffer::Target .\n     */\n    void bind(Target target) const;\n\n    /** @brief Unbind any buffers from the specified binding point.\n\n    @param target Binding point. See cv::ogl::Buffer::Target .\n     */\n    static void unbind(Target target);\n\n    /** @brief Maps OpenGL buffer to host memory.\n\n    mapHost maps to the client's address space the entire data store of the buffer object. The data can\n    then be directly read and/or written relative to the returned pointer, depending on the specified\n    access policy.\n\n    A mapped data store must be unmapped with ogl::Buffer::unmapHost before its buffer object is used.\n\n    This operation can lead to memory transfers between host and device.\n\n    Only one buffer object can be mapped at a time.\n    @param access Access policy, indicating whether it will be possible to read from, write to, or both\n    read from and write to the buffer object's mapped data store. The symbolic constant must be\n    ogl::Buffer::READ_ONLY , ogl::Buffer::WRITE_ONLY or ogl::Buffer::READ_WRITE .\n     */\n    Mat mapHost(Access access);\n\n    /** @brief Unmaps OpenGL buffer.\n    */\n    void unmapHost();\n\n    //! map to device memory (blocking)\n    cuda::GpuMat mapDevice();\n    void unmapDevice();\n\n    /** @brief Maps OpenGL buffer to CUDA device memory.\n\n    This operatation doesn't copy data. Several buffer objects can be mapped to CUDA memory at a time.\n\n    A mapped data store must be unmapped with ogl::Buffer::unmapDevice before its buffer object is used.\n     */\n    cuda::GpuMat mapDevice(cuda::Stream& stream);\n\n    /** @brief Unmaps OpenGL buffer.\n    */\n    void unmapDevice(cuda::Stream& stream);\n\n    int rows() const;\n    int cols() const;\n    Size size() const;\n    bool empty() const;\n\n    int type() const;\n    int depth() const;\n    int channels() const;\n    int elemSize() const;\n    int elemSize1() const;\n\n    //! get OpenGL opject id\n    unsigned int bufId() const;\n\n    class Impl;\n\nprivate:\n    Ptr<Impl> impl_;\n    int rows_;\n    int cols_;\n    int type_;\n};\n\n/** @brief Smart pointer for OpenGL 2D texture memory with reference counting.\n */\nclass CV_EXPORTS Texture2D\n{\npublic:\n    /** @brief An Image Format describes the way that the images in Textures store their data.\n    */\n    enum Format\n    {\n        NONE            = 0,\n        DEPTH_COMPONENT = 0x1902, //!< Depth\n        RGB             = 0x1907, //!< Red, Green, Blue\n        RGBA            = 0x1908  //!< Red, Green, Blue, Alpha\n    };\n\n    /** @brief The constructors.\n\n    Creates empty ogl::Texture2D object, allocates memory for ogl::Texture2D object or copies from\n    host/device memory.\n     */\n    Texture2D();\n\n    /** @overload */\n    Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false);\n\n    /** @overload */\n    Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false);\n\n    /** @overload\n    @param arows Number of rows.\n    @param acols Number of columns.\n    @param aformat Image format. See cv::ogl::Texture2D::Format .\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n    */\n    Texture2D(int arows, int acols, Format aformat, bool autoRelease = false);\n\n    /** @overload\n    @param asize 2D array size.\n    @param aformat Image format. See cv::ogl::Texture2D::Format .\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n    */\n    Texture2D(Size asize, Format aformat, bool autoRelease = false);\n\n    /** @overload\n    @param arr Input array (host or device memory, it can be Mat , cuda::GpuMat or ogl::Buffer ).\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n    */\n    explicit Texture2D(InputArray arr, bool autoRelease = false);\n\n    /** @brief Allocates memory for ogl::Texture2D object.\n\n    @param arows Number of rows.\n    @param acols Number of columns.\n    @param aformat Image format. See cv::ogl::Texture2D::Format .\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n     */\n    void create(int arows, int acols, Format aformat, bool autoRelease = false);\n    /** @overload\n    @param asize 2D array size.\n    @param aformat Image format. See cv::ogl::Texture2D::Format .\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n    */\n    void create(Size asize, Format aformat, bool autoRelease = false);\n\n    /** @brief Decrements the reference counter and destroys the texture object if needed.\n\n    The function will call setAutoRelease(true) .\n     */\n    void release();\n\n    /** @brief Sets auto release mode.\n\n    @param flag Auto release mode (if true, release will be called in object's destructor).\n\n    The lifetime of the OpenGL object is tied to the lifetime of the context. If OpenGL context was\n    bound to a window it could be released at any time (user can close a window). If object's destructor\n    is called after destruction of the context it will cause an error. Thus ogl::Texture2D doesn't\n    destroy OpenGL object in destructor by default (all OpenGL resources will be released with OpenGL\n    context). This function can force ogl::Texture2D destructor to destroy OpenGL object.\n     */\n    void setAutoRelease(bool flag);\n\n    /** @brief Copies from host/device memory to OpenGL texture.\n\n    @param arr Input array (host or device memory, it can be Mat , cuda::GpuMat or ogl::Buffer ).\n    @param autoRelease Auto release mode (if true, release will be called in object's destructor).\n     */\n    void copyFrom(InputArray arr, bool autoRelease = false);\n\n    /** @brief Copies from OpenGL texture to host/device memory or another OpenGL texture object.\n\n    @param arr Destination array (host or device memory, can be Mat , cuda::GpuMat , ogl::Buffer or\n    ogl::Texture2D ).\n    @param ddepth Destination depth.\n    @param autoRelease Auto release mode for destination buffer (if arr is OpenGL buffer or texture).\n     */\n    void copyTo(OutputArray arr, int ddepth = CV_32F, bool autoRelease = false) const;\n\n    /** @brief Binds texture to current active texture unit for GL_TEXTURE_2D target.\n    */\n    void bind() const;\n\n    int rows() const;\n    int cols() const;\n    Size size() const;\n    bool empty() const;\n\n    Format format() const;\n\n    //! get OpenGL opject id\n    unsigned int texId() const;\n\n    class Impl;\n\nprivate:\n    Ptr<Impl> impl_;\n    int rows_;\n    int cols_;\n    Format format_;\n};\n\n/** @brief Wrapper for OpenGL Client-Side Vertex arrays.\n\nogl::Arrays stores vertex data in ogl::Buffer objects.\n */\nclass CV_EXPORTS Arrays\n{\npublic:\n    /** @brief Default constructor\n     */\n    Arrays();\n\n    /** @brief Sets an array of vertex coordinates.\n    @param vertex array with vertex coordinates, can be both host and device memory.\n    */\n    void setVertexArray(InputArray vertex);\n\n    /** @brief Resets vertex coordinates.\n    */\n    void resetVertexArray();\n\n    /** @brief Sets an array of vertex colors.\n    @param color array with vertex colors, can be both host and device memory.\n     */\n    void setColorArray(InputArray color);\n\n    /** @brief Resets vertex colors.\n    */\n    void resetColorArray();\n\n    /** @brief Sets an array of vertex normals.\n    @param normal array with vertex normals, can be both host and device memory.\n     */\n    void setNormalArray(InputArray normal);\n\n    /** @brief Resets vertex normals.\n    */\n    void resetNormalArray();\n\n    /** @brief Sets an array of vertex texture coordinates.\n    @param texCoord array with vertex texture coordinates, can be both host and device memory.\n     */\n    void setTexCoordArray(InputArray texCoord);\n\n    /** @brief Resets vertex texture coordinates.\n    */\n    void resetTexCoordArray();\n\n    /** @brief Releases all inner buffers.\n    */\n    void release();\n\n    /** @brief Sets auto release mode all inner buffers.\n    @param flag Auto release mode.\n     */\n    void setAutoRelease(bool flag);\n\n    /** @brief Binds all vertex arrays.\n    */\n    void bind() const;\n\n    /** @brief Returns the vertex count.\n    */\n    int size() const;\n    bool empty() const;\n\nprivate:\n    int size_;\n    Buffer vertex_;\n    Buffer color_;\n    Buffer normal_;\n    Buffer texCoord_;\n};\n\n/////////////////// Render Functions ///////////////////\n\n//! render mode\nenum RenderModes {\n    POINTS         = 0x0000,\n    LINES          = 0x0001,\n    LINE_LOOP      = 0x0002,\n    LINE_STRIP     = 0x0003,\n    TRIANGLES      = 0x0004,\n    TRIANGLE_STRIP = 0x0005,\n    TRIANGLE_FAN   = 0x0006,\n    QUADS          = 0x0007,\n    QUAD_STRIP     = 0x0008,\n    POLYGON        = 0x0009\n};\n\n/** @brief Render OpenGL texture or primitives.\n@param tex Texture to draw.\n@param wndRect Region of window, where to draw a texture (normalized coordinates).\n@param texRect Region of texture to draw (normalized coordinates).\n */\nCV_EXPORTS void render(const Texture2D& tex,\n    Rect_<double> wndRect = Rect_<double>(0.0, 0.0, 1.0, 1.0),\n    Rect_<double> texRect = Rect_<double>(0.0, 0.0, 1.0, 1.0));\n\n/** @overload\n@param arr Array of privitives vertices.\n@param mode Render mode. One of cv::ogl::RenderModes\n@param color Color for all vertices. Will be used if arr doesn't contain color array.\n*/\nCV_EXPORTS void render(const Arrays& arr, int mode = POINTS, Scalar color = Scalar::all(255));\n\n/** @overload\n@param arr Array of privitives vertices.\n@param indices Array of vertices indices (host or device memory).\n@param mode Render mode. One of cv::ogl::RenderModes\n@param color Color for all vertices. Will be used if arr doesn't contain color array.\n*/\nCV_EXPORTS void render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255));\n\n/////////////////// CL-GL Interoperability Functions ///////////////////\n\nnamespace ocl {\nusing namespace cv::ocl;\n\n// TODO static functions in the Context class\n/** @brief Creates OpenCL context from GL.\n@return Returns reference to OpenCL Context\n */\nCV_EXPORTS Context& initializeContextFromGL();\n\n} // namespace cv::ogl::ocl\n\n/** @brief Converts InputArray to Texture2D object.\n@param src     - source InputArray.\n@param texture - destination Texture2D object.\n */\nCV_EXPORTS void convertToGLTexture2D(InputArray src, Texture2D& texture);\n\n/** @brief Converts Texture2D object to OutputArray.\n@param texture - source Texture2D object.\n@param dst     - destination OutputArray.\n */\nCV_EXPORTS void convertFromGLTexture2D(const Texture2D& texture, OutputArray dst);\n\n/** @brief Maps Buffer object to process on CL side (convert to UMat).\n\nFunction creates CL buffer from GL one, and then constructs UMat that can be used\nto process buffer data with OpenCV functions. Note that in current implementation\nUMat constructed this way doesn't own corresponding GL buffer object, so it is\nthe user responsibility to close down CL/GL buffers relationships by explicitly\ncalling unmapGLBuffer() function.\n@param buffer      - source Buffer object.\n@param accessFlags - data access flags (ACCESS_READ|ACCESS_WRITE).\n@return Returns UMat object\n */\nCV_EXPORTS UMat mapGLBuffer(const Buffer& buffer, int accessFlags = ACCESS_READ|ACCESS_WRITE);\n\n/** @brief Unmaps Buffer object (releases UMat, previously mapped from Buffer).\n\nFunction must be called explicitly by the user for each UMat previously constructed\nby the call to mapGLBuffer() function.\n@param u           - source UMat, created by mapGLBuffer().\n */\nCV_EXPORTS void unmapGLBuffer(UMat& u);\n\n}} // namespace cv::ogl\n\nnamespace cv { namespace cuda {\n\n//! @addtogroup cuda\n//! @{\n\n/** @brief Sets a CUDA device and initializes it for the current thread with OpenGL interoperability.\n\nThis function should be explicitly called after OpenGL context creation and before any CUDA calls.\n@param device System index of a CUDA device starting with 0.\n@ingroup core_opengl\n */\nCV_EXPORTS void setGlDevice(int device = 0);\n\n//! @}\n\n}}\n\n//! @cond IGNORED\n\n////////////////////////////////////////////////////////////////////////\n////////////////////////////////////////////////////////////////////////\n////////////////////////////////////////////////////////////////////////\n\ninline\ncv::ogl::Buffer::Buffer(int arows, int acols, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)\n{\n    create(arows, acols, atype, target, autoRelease);\n}\n\ninline\ncv::ogl::Buffer::Buffer(Size asize, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)\n{\n    create(asize, atype, target, autoRelease);\n}\n\ninline\nvoid cv::ogl::Buffer::create(Size asize, int atype, Target target, bool autoRelease)\n{\n    create(asize.height, asize.width, atype, target, autoRelease);\n}\n\ninline\nint cv::ogl::Buffer::rows() const\n{\n    return rows_;\n}\n\ninline\nint cv::ogl::Buffer::cols() const\n{\n    return cols_;\n}\n\ninline\ncv::Size cv::ogl::Buffer::size() const\n{\n    return Size(cols_, rows_);\n}\n\ninline\nbool cv::ogl::Buffer::empty() const\n{\n    return rows_ == 0 || cols_ == 0;\n}\n\ninline\nint cv::ogl::Buffer::type() const\n{\n    return type_;\n}\n\ninline\nint cv::ogl::Buffer::depth() const\n{\n    return CV_MAT_DEPTH(type_);\n}\n\ninline\nint cv::ogl::Buffer::channels() const\n{\n    return CV_MAT_CN(type_);\n}\n\ninline\nint cv::ogl::Buffer::elemSize() const\n{\n    return CV_ELEM_SIZE(type_);\n}\n\ninline\nint cv::ogl::Buffer::elemSize1() const\n{\n    return CV_ELEM_SIZE1(type_);\n}\n\n///////\n\ninline\ncv::ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE)\n{\n    create(arows, acols, aformat, autoRelease);\n}\n\ninline\ncv::ogl::Texture2D::Texture2D(Size asize, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE)\n{\n    create(asize, aformat, autoRelease);\n}\n\ninline\nvoid cv::ogl::Texture2D::create(Size asize, Format aformat, bool autoRelease)\n{\n    create(asize.height, asize.width, aformat, autoRelease);\n}\n\ninline\nint cv::ogl::Texture2D::rows() const\n{\n    return rows_;\n}\n\ninline\nint cv::ogl::Texture2D::cols() const\n{\n    return cols_;\n}\n\ninline\ncv::Size cv::ogl::Texture2D::size() const\n{\n    return Size(cols_, rows_);\n}\n\ninline\nbool cv::ogl::Texture2D::empty() const\n{\n    return rows_ == 0 || cols_ == 0;\n}\n\ninline\ncv::ogl::Texture2D::Format cv::ogl::Texture2D::format() const\n{\n    return format_;\n}\n\n///////\n\ninline\ncv::ogl::Arrays::Arrays() : size_(0)\n{\n}\n\ninline\nint cv::ogl::Arrays::size() const\n{\n    return size_;\n}\n\ninline\nbool cv::ogl::Arrays::empty() const\n{\n    return size_ == 0;\n}\n\n//! @endcond\n\n#endif /* __OPENCV_CORE_OPENGL_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/operations.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_OPERATIONS_HPP__\n#define __OPENCV_CORE_OPERATIONS_HPP__\n\n#ifndef __cplusplus\n#  error operations.hpp header must be compiled as C++\n#endif\n\n#include <cstdio>\n\n//! @cond IGNORED\n\nnamespace cv\n{\n\n////////////////////////////// Matx methods depending on core API /////////////////////////////\n\nnamespace internal\n{\n\ntemplate<typename _Tp, int m> struct Matx_FastInvOp\n{\n    bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const\n    {\n        Matx<_Tp, m, m> temp = a;\n\n        // assume that b is all 0's on input => make it a unity matrix\n        for( int i = 0; i < m; i++ )\n            b(i, i) = (_Tp)1;\n\n        if( method == DECOMP_CHOLESKY )\n            return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m);\n\n        return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0;\n    }\n};\n\ntemplate<typename _Tp> struct Matx_FastInvOp<_Tp, 2>\n{\n    bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int) const\n    {\n        _Tp d = determinant(a);\n        if( d == 0 )\n            return false;\n        d = 1/d;\n        b(1,1) = a(0,0)*d;\n        b(0,0) = a(1,1)*d;\n        b(0,1) = -a(0,1)*d;\n        b(1,0) = -a(1,0)*d;\n        return true;\n    }\n};\n\ntemplate<typename _Tp> struct Matx_FastInvOp<_Tp, 3>\n{\n    bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int) const\n    {\n        _Tp d = (_Tp)determinant(a);\n        if( d == 0 )\n            return false;\n        d = 1/d;\n        b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d;\n        b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d;\n        b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d;\n\n        b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d;\n        b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d;\n        b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d;\n\n        b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d;\n        b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d;\n        b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d;\n        return true;\n    }\n};\n\n\ntemplate<typename _Tp, int m, int n> struct Matx_FastSolveOp\n{\n    bool operator()(const Matx<_Tp, m, m>& a, const Matx<_Tp, m, n>& b,\n                    Matx<_Tp, m, n>& x, int method) const\n    {\n        Matx<_Tp, m, m> temp = a;\n        x = b;\n        if( method == DECOMP_CHOLESKY )\n            return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n);\n\n        return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0;\n    }\n};\n\ntemplate<typename _Tp> struct Matx_FastSolveOp<_Tp, 2, 1>\n{\n    bool operator()(const Matx<_Tp, 2, 2>& a, const Matx<_Tp, 2, 1>& b,\n                    Matx<_Tp, 2, 1>& x, int) const\n    {\n        _Tp d = determinant(a);\n        if( d == 0 )\n            return false;\n        d = 1/d;\n        x(0) = (b(0)*a(1,1) - b(1)*a(0,1))*d;\n        x(1) = (b(1)*a(0,0) - b(0)*a(1,0))*d;\n        return true;\n    }\n};\n\ntemplate<typename _Tp> struct Matx_FastSolveOp<_Tp, 3, 1>\n{\n    bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b,\n                    Matx<_Tp, 3, 1>& x, int) const\n    {\n        _Tp d = (_Tp)determinant(a);\n        if( d == 0 )\n            return false;\n        d = 1/d;\n        x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) -\n                a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) +\n                a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2)));\n\n        x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) -\n                b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) +\n                a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0)));\n\n        x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) -\n                a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) +\n                b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0)));\n        return true;\n    }\n};\n\n} // internal\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n> Matx<_Tp,m,n>::randu(_Tp a, _Tp b)\n{\n    Matx<_Tp,m,n> M;\n    cv::randu(M, Scalar(a), Scalar(b));\n    return M;\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b)\n{\n    Matx<_Tp,m,n> M;\n    cv::randn(M, Scalar(a), Scalar(b));\n    return M;\n}\n\ntemplate<typename _Tp, int m, int n> inline\nMatx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method, bool *p_is_ok /*= NULL*/) const\n{\n    Matx<_Tp, n, m> b;\n    bool ok;\n    if( method == DECOMP_LU || method == DECOMP_CHOLESKY )\n        ok = cv::internal::Matx_FastInvOp<_Tp, m>()(*this, b, method);\n    else\n    {\n        Mat A(*this, false), B(b, false);\n        ok = (invert(A, B, method) != 0);\n    }\n    if( NULL != p_is_ok ) { *p_is_ok = ok; }\n    return ok ? b : Matx<_Tp, n, m>::zeros();\n}\n\ntemplate<typename _Tp, int m, int n> template<int l> inline\nMatx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const\n{\n    Matx<_Tp, n, l> x;\n    bool ok;\n    if( method == DECOMP_LU || method == DECOMP_CHOLESKY )\n        ok = cv::internal::Matx_FastSolveOp<_Tp, m, l>()(*this, rhs, x, method);\n    else\n    {\n        Mat A(*this, false), B(rhs, false), X(x, false);\n        ok = cv::solve(A, B, X, method);\n    }\n\n    return ok ? x : Matx<_Tp, n, l>::zeros();\n}\n\n\n\n////////////////////////// Augmenting algebraic & logical operations //////////////////////////\n\n#define CV_MAT_AUG_OPERATOR1(op, cvop, A, B) \\\n    static inline A& operator op (A& a, const B& b) { cvop; return a; }\n\n#define CV_MAT_AUG_OPERATOR(op, cvop, A, B)   \\\n    CV_MAT_AUG_OPERATOR1(op, cvop, A, B)      \\\n    CV_MAT_AUG_OPERATOR1(op, cvop, const A, B)\n\n#define CV_MAT_AUG_OPERATOR_T(op, cvop, A, B)                   \\\n    template<typename _Tp> CV_MAT_AUG_OPERATOR1(op, cvop, A, B) \\\n    template<typename _Tp> CV_MAT_AUG_OPERATOR1(op, cvop, const A, B)\n\nCV_MAT_AUG_OPERATOR  (+=, cv::add(a,b,a), Mat, Mat)\nCV_MAT_AUG_OPERATOR  (+=, cv::add(a,b,a), Mat, Scalar)\nCV_MAT_AUG_OPERATOR_T(+=, cv::add(a,b,a), Mat_<_Tp>, Mat)\nCV_MAT_AUG_OPERATOR_T(+=, cv::add(a,b,a), Mat_<_Tp>, Scalar)\nCV_MAT_AUG_OPERATOR_T(+=, cv::add(a,b,a), Mat_<_Tp>, Mat_<_Tp>)\n\nCV_MAT_AUG_OPERATOR  (-=, cv::subtract(a,b,a), Mat, Mat)\nCV_MAT_AUG_OPERATOR  (-=, cv::subtract(a,b,a), Mat, Scalar)\nCV_MAT_AUG_OPERATOR_T(-=, cv::subtract(a,b,a), Mat_<_Tp>, Mat)\nCV_MAT_AUG_OPERATOR_T(-=, cv::subtract(a,b,a), Mat_<_Tp>, Scalar)\nCV_MAT_AUG_OPERATOR_T(-=, cv::subtract(a,b,a), Mat_<_Tp>, Mat_<_Tp>)\n\nCV_MAT_AUG_OPERATOR  (*=, cv::gemm(a, b, 1, Mat(), 0, a, 0), Mat, Mat)\nCV_MAT_AUG_OPERATOR_T(*=, cv::gemm(a, b, 1, Mat(), 0, a, 0), Mat_<_Tp>, Mat)\nCV_MAT_AUG_OPERATOR_T(*=, cv::gemm(a, b, 1, Mat(), 0, a, 0), Mat_<_Tp>, Mat_<_Tp>)\nCV_MAT_AUG_OPERATOR  (*=, a.convertTo(a, -1, b), Mat, double)\nCV_MAT_AUG_OPERATOR_T(*=, a.convertTo(a, -1, b), Mat_<_Tp>, double)\n\nCV_MAT_AUG_OPERATOR  (/=, cv::divide(a,b,a), Mat, Mat)\nCV_MAT_AUG_OPERATOR_T(/=, cv::divide(a,b,a), Mat_<_Tp>, Mat)\nCV_MAT_AUG_OPERATOR_T(/=, cv::divide(a,b,a), Mat_<_Tp>, Mat_<_Tp>)\nCV_MAT_AUG_OPERATOR  (/=, a.convertTo((Mat&)a, -1, 1./b), Mat, double)\nCV_MAT_AUG_OPERATOR_T(/=, a.convertTo((Mat&)a, -1, 1./b), Mat_<_Tp>, double)\n\nCV_MAT_AUG_OPERATOR  (&=, cv::bitwise_and(a,b,a), Mat, Mat)\nCV_MAT_AUG_OPERATOR  (&=, cv::bitwise_and(a,b,a), Mat, Scalar)\nCV_MAT_AUG_OPERATOR_T(&=, cv::bitwise_and(a,b,a), Mat_<_Tp>, Mat)\nCV_MAT_AUG_OPERATOR_T(&=, cv::bitwise_and(a,b,a), Mat_<_Tp>, Scalar)\nCV_MAT_AUG_OPERATOR_T(&=, cv::bitwise_and(a,b,a), Mat_<_Tp>, Mat_<_Tp>)\n\nCV_MAT_AUG_OPERATOR  (|=, cv::bitwise_or(a,b,a), Mat, Mat)\nCV_MAT_AUG_OPERATOR  (|=, cv::bitwise_or(a,b,a), Mat, Scalar)\nCV_MAT_AUG_OPERATOR_T(|=, cv::bitwise_or(a,b,a), Mat_<_Tp>, Mat)\nCV_MAT_AUG_OPERATOR_T(|=, cv::bitwise_or(a,b,a), Mat_<_Tp>, Scalar)\nCV_MAT_AUG_OPERATOR_T(|=, cv::bitwise_or(a,b,a), Mat_<_Tp>, Mat_<_Tp>)\n\nCV_MAT_AUG_OPERATOR  (^=, cv::bitwise_xor(a,b,a), Mat, Mat)\nCV_MAT_AUG_OPERATOR  (^=, cv::bitwise_xor(a,b,a), Mat, Scalar)\nCV_MAT_AUG_OPERATOR_T(^=, cv::bitwise_xor(a,b,a), Mat_<_Tp>, Mat)\nCV_MAT_AUG_OPERATOR_T(^=, cv::bitwise_xor(a,b,a), Mat_<_Tp>, Scalar)\nCV_MAT_AUG_OPERATOR_T(^=, cv::bitwise_xor(a,b,a), Mat_<_Tp>, Mat_<_Tp>)\n\n#undef CV_MAT_AUG_OPERATOR_T\n#undef CV_MAT_AUG_OPERATOR\n#undef CV_MAT_AUG_OPERATOR1\n\n\n\n///////////////////////////////////////////// SVD /////////////////////////////////////////////\n\ninline SVD::SVD() {}\ninline SVD::SVD( InputArray m, int flags ) { operator ()(m, flags); }\ninline void SVD::solveZ( InputArray m, OutputArray _dst )\n{\n    Mat mtx = m.getMat();\n    SVD svd(mtx, (mtx.rows >= mtx.cols ? 0 : SVD::FULL_UV));\n    _dst.create(svd.vt.cols, 1, svd.vt.type());\n    Mat dst = _dst.getMat();\n    svd.vt.row(svd.vt.rows-1).reshape(1,svd.vt.cols).copyTo(dst);\n}\n\ntemplate<typename _Tp, int m, int n, int nm> inline void\n    SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt )\n{\n    CV_StaticAssert( nm == MIN(m, n), \"Invalid size of output vector.\");\n    Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false);\n    SVD::compute(_a, _w, _u, _vt);\n    CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]);\n}\n\ntemplate<typename _Tp, int m, int n, int nm> inline void\nSVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w )\n{\n    CV_StaticAssert( nm == MIN(m, n), \"Invalid size of output vector.\");\n    Mat _a(a, false), _w(w, false);\n    SVD::compute(_a, _w);\n    CV_Assert(_w.data == (uchar*)&w.val[0]);\n}\n\ntemplate<typename _Tp, int m, int n, int nm, int nb> inline void\nSVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u,\n                const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs,\n                Matx<_Tp, n, nb>& dst )\n{\n    CV_StaticAssert( nm == MIN(m, n), \"Invalid size of output vector.\");\n    Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false);\n    SVD::backSubst(_w, _u, _vt, _rhs, _dst);\n    CV_Assert(_dst.data == (uchar*)&dst.val[0]);\n}\n\n\n\n/////////////////////////////////// Multiply-with-Carry RNG ///////////////////////////////////\n\ninline RNG::RNG()              { state = 0xffffffff; }\ninline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; }\n\ninline RNG::operator uchar()    { return (uchar)next(); }\ninline RNG::operator schar()    { return (schar)next(); }\ninline RNG::operator ushort()   { return (ushort)next(); }\ninline RNG::operator short()    { return (short)next(); }\ninline RNG::operator int()      { return (int)next(); }\ninline RNG::operator unsigned() { return next(); }\ninline RNG::operator float()    { return next()*2.3283064365386962890625e-10f; }\ninline RNG::operator double()   { unsigned t = next(); return (((uint64)t << 32) | next()) * 5.4210108624275221700372640043497e-20; }\n\ninline unsigned RNG::operator ()(unsigned N) { return (unsigned)uniform(0,N); }\ninline unsigned RNG::operator ()()           { return next(); }\n\ninline int    RNG::uniform(int a, int b)       { return a == b ? a : (int)(next() % (b - a) + a); }\ninline float  RNG::uniform(float a, float b)   { return ((float)*this)*(b - a) + a; }\ninline double RNG::uniform(double a, double b) { return ((double)*this)*(b - a) + a; }\n\ninline unsigned RNG::next()\n{\n    state = (uint64)(unsigned)state* /*CV_RNG_COEFF*/ 4164903690U + (unsigned)(state >> 32);\n    return (unsigned)state;\n}\n\n//! returns the next unifomly-distributed random number of the specified type\ntemplate<typename _Tp> static inline _Tp randu()\n{\n  return (_Tp)theRNG();\n}\n\n///////////////////////////////// Formatted string generation /////////////////////////////////\n\nCV_EXPORTS String format( const char* fmt, ... );\n\n///////////////////////////////// Formatted output of cv::Mat /////////////////////////////////\n\nstatic inline\nPtr<Formatted> format(InputArray mtx, int fmt)\n{\n    return Formatter::get(fmt)->format(mtx.getMat());\n}\n\nstatic inline\nint print(Ptr<Formatted> fmtd, FILE* stream = stdout)\n{\n    int written = 0;\n    fmtd->reset();\n    for(const char* str = fmtd->next(); str; str = fmtd->next())\n        written += fputs(str, stream);\n\n    return written;\n}\n\nstatic inline\nint print(const Mat& mtx, FILE* stream = stdout)\n{\n    return print(Formatter::get()->format(mtx), stream);\n}\n\nstatic inline\nint print(const UMat& mtx, FILE* stream = stdout)\n{\n    return print(Formatter::get()->format(mtx.getMat(ACCESS_READ)), stream);\n}\n\ntemplate<typename _Tp> static inline\nint print(const std::vector<Point_<_Tp> >& vec, FILE* stream = stdout)\n{\n    return print(Formatter::get()->format(Mat(vec)), stream);\n}\n\ntemplate<typename _Tp> static inline\nint print(const std::vector<Point3_<_Tp> >& vec, FILE* stream = stdout)\n{\n    return print(Formatter::get()->format(Mat(vec)), stream);\n}\n\ntemplate<typename _Tp, int m, int n> static inline\nint print(const Matx<_Tp, m, n>& matx, FILE* stream = stdout)\n{\n    return print(Formatter::get()->format(cv::Mat(matx)), stream);\n}\n\n//! @endcond\n\n/****************************************************************************************\\\n*                                  Auxiliary algorithms                                  *\n\\****************************************************************************************/\n\n/** @brief Splits an element set into equivalency classes.\n\nThe generic function partition implements an \\f$O(N^2)\\f$ algorithm for splitting a set of \\f$N\\f$ elements\ninto one or more equivalency classes, as described in\n<http://en.wikipedia.org/wiki/Disjoint-set_data_structure> . The function returns the number of\nequivalency classes.\n@param _vec Set of elements stored as a vector.\n@param labels Output vector of labels. It contains as many elements as vec. Each label labels[i] is\na 0-based cluster index of `vec[i]`.\n@param predicate Equivalence predicate (pointer to a boolean function of two arguments or an\ninstance of the class that has the method bool operator()(const _Tp& a, const _Tp& b) ). The\npredicate returns true when the elements are certainly in the same class, and returns false if they\nmay or may not be in the same class.\n@ingroup core_cluster\n*/\ntemplate<typename _Tp, class _EqPredicate> int\npartition( const std::vector<_Tp>& _vec, std::vector<int>& labels,\n          _EqPredicate predicate=_EqPredicate())\n{\n    int i, j, N = (int)_vec.size();\n    const _Tp* vec = &_vec[0];\n\n    const int PARENT=0;\n    const int RANK=1;\n\n    std::vector<int> _nodes(N*2);\n    int (*nodes)[2] = (int(*)[2])&_nodes[0];\n\n    // The first O(N) pass: create N single-vertex trees\n    for(i = 0; i < N; i++)\n    {\n        nodes[i][PARENT]=-1;\n        nodes[i][RANK] = 0;\n    }\n\n    // The main O(N^2) pass: merge connected components\n    for( i = 0; i < N; i++ )\n    {\n        int root = i;\n\n        // find root\n        while( nodes[root][PARENT] >= 0 )\n            root = nodes[root][PARENT];\n\n        for( j = 0; j < N; j++ )\n        {\n            if( i == j || !predicate(vec[i], vec[j]))\n                continue;\n            int root2 = j;\n\n            while( nodes[root2][PARENT] >= 0 )\n                root2 = nodes[root2][PARENT];\n\n            if( root2 != root )\n            {\n                // unite both trees\n                int rank = nodes[root][RANK], rank2 = nodes[root2][RANK];\n                if( rank > rank2 )\n                    nodes[root2][PARENT] = root;\n                else\n                {\n                    nodes[root][PARENT] = root2;\n                    nodes[root2][RANK] += rank == rank2;\n                    root = root2;\n                }\n                CV_Assert( nodes[root][PARENT] < 0 );\n\n                int k = j, parent;\n\n                // compress the path from node2 to root\n                while( (parent = nodes[k][PARENT]) >= 0 )\n                {\n                    nodes[k][PARENT] = root;\n                    k = parent;\n                }\n\n                // compress the path from node to root\n                k = i;\n                while( (parent = nodes[k][PARENT]) >= 0 )\n                {\n                    nodes[k][PARENT] = root;\n                    k = parent;\n                }\n            }\n        }\n    }\n\n    // Final O(N) pass: enumerate classes\n    labels.resize(N);\n    int nclasses = 0;\n\n    for( i = 0; i < N; i++ )\n    {\n        int root = i;\n        while( nodes[root][PARENT] >= 0 )\n            root = nodes[root][PARENT];\n        // re-use the rank as the class label\n        if( nodes[root][RANK] >= 0 )\n            nodes[root][RANK] = ~nclasses++;\n        labels[i] = ~nodes[root][RANK];\n    }\n\n    return nclasses;\n}\n\n} // cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/optim.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the OpenCV Foundation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OPTIM_HPP__\n#define __OPENCV_OPTIM_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\n\n/** @addtogroup core_optim\nThe algorithms in this section minimize or maximize function value within specified constraints or\nwithout any constraints.\n@{\n*/\n\n/** @brief Basic interface for all solvers\n */\nclass CV_EXPORTS MinProblemSolver : public Algorithm\n{\npublic:\n    /** @brief Represents function being optimized\n     */\n    class CV_EXPORTS Function\n    {\n    public:\n        virtual ~Function() {}\n        virtual int getDims() const = 0;\n        virtual double getGradientEps() const;\n        virtual double calc(const double* x) const = 0;\n        virtual void getGradient(const double* x,double* grad);\n    };\n\n    /** @brief Getter for the optimized function.\n\n    The optimized function is represented by Function interface, which requires derivatives to\n    implement the sole method calc(double*) to evaluate the function.\n\n    @return Smart-pointer to an object that implements Function interface - it represents the\n    function that is being optimized. It can be empty, if no function was given so far.\n     */\n    virtual Ptr<Function> getFunction() const = 0;\n\n    /** @brief Setter for the optimized function.\n\n    *It should be called at least once before the call to* minimize(), as default value is not usable.\n\n    @param f The new function to optimize.\n     */\n    virtual void setFunction(const Ptr<Function>& f) = 0;\n\n    /** @brief Getter for the previously set terminal criteria for this algorithm.\n\n    @return Deep copy of the terminal criteria used at the moment.\n     */\n    virtual TermCriteria getTermCriteria() const = 0;\n\n    /** @brief Set terminal criteria for solver.\n\n    This method *is not necessary* to be called before the first call to minimize(), as the default\n    value is sensible.\n\n    Algorithm stops when the number of function evaluations done exceeds termcrit.maxCount, when\n    the function values at the vertices of simplex are within termcrit.epsilon range or simplex\n    becomes so small that it can enclosed in a box with termcrit.epsilon sides, whatever comes\n    first.\n    @param termcrit Terminal criteria to be used, represented as cv::TermCriteria structure.\n     */\n    virtual void setTermCriteria(const TermCriteria& termcrit) = 0;\n\n    /** @brief actually runs the algorithm and performs the minimization.\n\n    The sole input parameter determines the centroid of the starting simplex (roughly, it tells\n    where to start), all the others (terminal criteria, initial step, function to be minimized) are\n    supposed to be set via the setters before the call to this method or the default values (not\n    always sensible) will be used.\n\n    @param x The initial point, that will become a centroid of an initial simplex. After the algorithm\n    will terminate, it will be setted to the point where the algorithm stops, the point of possible\n    minimum.\n    @return The value of a function at the point found.\n     */\n    virtual double minimize(InputOutputArray x) = 0;\n};\n\n/** @brief This class is used to perform the non-linear non-constrained minimization of a function,\n\ndefined on an `n`-dimensional Euclidean space, using the **Nelder-Mead method**, also known as\n**downhill simplex method**. The basic idea about the method can be obtained from\n<http://en.wikipedia.org/wiki/Nelder-Mead_method>.\n\nIt should be noted, that this method, although deterministic, is rather a heuristic and therefore\nmay converge to a local minima, not necessary a global one. It is iterative optimization technique,\nwhich at each step uses an information about the values of a function evaluated only at `n+1`\npoints, arranged as a *simplex* in `n`-dimensional space (hence the second name of the method). At\neach step new point is chosen to evaluate function at, obtained value is compared with previous\nones and based on this information simplex changes it's shape , slowly moving to the local minimum.\nThus this method is using *only* function values to make decision, on contrary to, say, Nonlinear\nConjugate Gradient method (which is also implemented in optim).\n\nAlgorithm stops when the number of function evaluations done exceeds termcrit.maxCount, when the\nfunction values at the vertices of simplex are within termcrit.epsilon range or simplex becomes so\nsmall that it can enclosed in a box with termcrit.epsilon sides, whatever comes first, for some\ndefined by user positive integer termcrit.maxCount and positive non-integer termcrit.epsilon.\n\n@note DownhillSolver is a derivative of the abstract interface\ncv::MinProblemSolver, which in turn is derived from the Algorithm interface and is used to\nencapsulate the functionality, common to all non-linear optimization algorithms in the optim\nmodule.\n\n@note term criteria should meet following condition:\n@code\n    termcrit.type == (TermCriteria::MAX_ITER + TermCriteria::EPS) && termcrit.epsilon > 0 && termcrit.maxCount > 0\n@endcode\n */\nclass CV_EXPORTS DownhillSolver : public MinProblemSolver\n{\npublic:\n    /** @brief Returns the initial step that will be used in downhill simplex algorithm.\n\n    @param step Initial step that will be used in algorithm. Note, that although corresponding setter\n    accepts column-vectors as well as row-vectors, this method will return a row-vector.\n    @see DownhillSolver::setInitStep\n     */\n    virtual void getInitStep(OutputArray step) const=0;\n\n    /** @brief Sets the initial step that will be used in downhill simplex algorithm.\n\n    Step, together with initial point (givin in DownhillSolver::minimize) are two `n`-dimensional\n    vectors that are used to determine the shape of initial simplex. Roughly said, initial point\n    determines the position of a simplex (it will become simplex's centroid), while step determines the\n    spread (size in each dimension) of a simplex. To be more precise, if \\f$s,x_0\\in\\mathbb{R}^n\\f$ are\n    the initial step and initial point respectively, the vertices of a simplex will be:\n    \\f$v_0:=x_0-\\frac{1}{2} s\\f$ and \\f$v_i:=x_0+s_i\\f$ for \\f$i=1,2,\\dots,n\\f$ where \\f$s_i\\f$ denotes\n    projections of the initial step of *n*-th coordinate (the result of projection is treated to be\n    vector given by \\f$s_i:=e_i\\cdot\\left<e_i\\cdot s\\right>\\f$, where \\f$e_i\\f$ form canonical basis)\n\n    @param step Initial step that will be used in algorithm. Roughly said, it determines the spread\n    (size in each dimension) of an initial simplex.\n     */\n    virtual void setInitStep(InputArray step)=0;\n\n    /** @brief This function returns the reference to the ready-to-use DownhillSolver object.\n\n    All the parameters are optional, so this procedure can be called even without parameters at\n    all. In this case, the default values will be used. As default value for terminal criteria are\n    the only sensible ones, MinProblemSolver::setFunction() and DownhillSolver::setInitStep()\n    should be called upon the obtained object, if the respective parameters were not given to\n    create(). Otherwise, the two ways (give parameters to createDownhillSolver() or miss them out\n    and call the MinProblemSolver::setFunction() and DownhillSolver::setInitStep()) are absolutely\n    equivalent (and will drop the same errors in the same way, should invalid input be detected).\n    @param f Pointer to the function that will be minimized, similarly to the one you submit via\n    MinProblemSolver::setFunction.\n    @param initStep Initial step, that will be used to construct the initial simplex, similarly to the one\n    you submit via MinProblemSolver::setInitStep.\n    @param termcrit Terminal criteria to the algorithm, similarly to the one you submit via\n    MinProblemSolver::setTermCriteria.\n     */\n    static Ptr<DownhillSolver> create(const Ptr<MinProblemSolver::Function>& f=Ptr<MinProblemSolver::Function>(),\n                                      InputArray initStep=Mat_<double>(1,1,0.0),\n                                      TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001));\n};\n\n/** @brief This class is used to perform the non-linear non-constrained minimization of a function\nwith known gradient,\n\ndefined on an *n*-dimensional Euclidean space, using the **Nonlinear Conjugate Gradient method**.\nThe implementation was done based on the beautifully clear explanatory article [An Introduction to\nthe Conjugate Gradient Method Without the Agonizing\nPain](http://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf) by Jonathan Richard\nShewchuk. The method can be seen as an adaptation of a standard Conjugate Gradient method (see, for\nexample <http://en.wikipedia.org/wiki/Conjugate_gradient_method>) for numerically solving the\nsystems of linear equations.\n\nIt should be noted, that this method, although deterministic, is rather a heuristic method and\ntherefore may converge to a local minima, not necessary a global one. What is even more disastrous,\nmost of its behaviour is ruled by gradient, therefore it essentially cannot distinguish between\nlocal minima and maxima. Therefore, if it starts sufficiently near to the local maximum, it may\nconverge to it. Another obvious restriction is that it should be possible to compute the gradient of\na function at any point, thus it is preferable to have analytic expression for gradient and\ncomputational burden should be born by the user.\n\nThe latter responsibility is accompilished via the getGradient method of a\nMinProblemSolver::Function interface (which represents function being optimized). This method takes\npoint a point in *n*-dimensional space (first argument represents the array of coordinates of that\npoint) and comput its gradient (it should be stored in the second argument as an array).\n\n@note class ConjGradSolver thus does not add any new methods to the basic MinProblemSolver interface.\n\n@note term criteria should meet following condition:\n@code\n    termcrit.type == (TermCriteria::MAX_ITER + TermCriteria::EPS) && termcrit.epsilon > 0 && termcrit.maxCount > 0\n    // or\n    termcrit.type == TermCriteria::MAX_ITER) && termcrit.maxCount > 0\n@endcode\n */\nclass CV_EXPORTS ConjGradSolver : public MinProblemSolver\n{\npublic:\n    /** @brief This function returns the reference to the ready-to-use ConjGradSolver object.\n\n    All the parameters are optional, so this procedure can be called even without parameters at\n    all. In this case, the default values will be used. As default value for terminal criteria are\n    the only sensible ones, MinProblemSolver::setFunction() should be called upon the obtained\n    object, if the function was not given to create(). Otherwise, the two ways (submit it to\n    create() or miss it out and call the MinProblemSolver::setFunction()) are absolutely equivalent\n    (and will drop the same errors in the same way, should invalid input be detected).\n    @param f Pointer to the function that will be minimized, similarly to the one you submit via\n    MinProblemSolver::setFunction.\n    @param termcrit Terminal criteria to the algorithm, similarly to the one you submit via\n    MinProblemSolver::setTermCriteria.\n    */\n    static Ptr<ConjGradSolver> create(const Ptr<MinProblemSolver::Function>& f=Ptr<ConjGradSolver::Function>(),\n                                      TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001));\n};\n\n//! return codes for cv::solveLP() function\nenum SolveLPResult\n{\n    SOLVELP_UNBOUNDED    = -2, //!< problem is unbounded (target function can achieve arbitrary high values)\n    SOLVELP_UNFEASIBLE    = -1, //!< problem is unfeasible (there are no points that satisfy all the constraints imposed)\n    SOLVELP_SINGLE    = 0, //!< there is only one maximum for target function\n    SOLVELP_MULTI    = 1 //!< there are multiple maxima for target function - the arbitrary one is returned\n};\n\n/** @brief Solve given (non-integer) linear programming problem using the Simplex Algorithm (Simplex Method).\n\nWhat we mean here by \"linear programming problem\" (or LP problem, for short) can be formulated as:\n\n\\f[\\mbox{Maximize } c\\cdot x\\\\\n \\mbox{Subject to:}\\\\\n Ax\\leq b\\\\\n x\\geq 0\\f]\n\nWhere \\f$c\\f$ is fixed `1`-by-`n` row-vector, \\f$A\\f$ is fixed `m`-by-`n` matrix, \\f$b\\f$ is fixed `m`-by-`1`\ncolumn vector and \\f$x\\f$ is an arbitrary `n`-by-`1` column vector, which satisfies the constraints.\n\nSimplex algorithm is one of many algorithms that are designed to handle this sort of problems\nefficiently. Although it is not optimal in theoretical sense (there exist algorithms that can solve\nany problem written as above in polynomial time, while simplex method degenerates to exponential\ntime for some special cases), it is well-studied, easy to implement and is shown to work well for\nreal-life purposes.\n\nThe particular implementation is taken almost verbatim from **Introduction to Algorithms, third\nedition** by T. H. Cormen, C. E. Leiserson, R. L. Rivest and Clifford Stein. In particular, the\nBland's rule <http://en.wikipedia.org/wiki/Bland%27s_rule> is used to prevent cycling.\n\n@param Func This row-vector corresponds to \\f$c\\f$ in the LP problem formulation (see above). It should\ncontain 32- or 64-bit floating point numbers. As a convenience, column-vector may be also submitted,\nin the latter case it is understood to correspond to \\f$c^T\\f$.\n@param Constr `m`-by-`n+1` matrix, whose rightmost column corresponds to \\f$b\\f$ in formulation above\nand the remaining to \\f$A\\f$. It should containt 32- or 64-bit floating point numbers.\n@param z The solution will be returned here as a column-vector - it corresponds to \\f$c\\f$ in the\nformulation above. It will contain 64-bit floating point numbers.\n@return One of cv::SolveLPResult\n */\nCV_EXPORTS_W int solveLP(const Mat& Func, const Mat& Constr, Mat& z);\n\n//! @}\n\n}// cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/persistence.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_PERSISTENCE_HPP__\n#define __OPENCV_CORE_PERSISTENCE_HPP__\n\n#ifndef __cplusplus\n#  error persistence.hpp header must be compiled as C++\n#endif\n\n//! @addtogroup core_c\n//! @{\n\n/** @brief \"black box\" representation of the file storage associated with a file on disk.\n\nSeveral functions that are described below take CvFileStorage\\* as inputs and allow the user to\nsave or to load hierarchical collections that consist of scalar values, standard CXCore objects\n(such as matrices, sequences, graphs), and user-defined objects.\n\nOpenCV can read and write data in XML (<http://www.w3c.org/XML>) or YAML (<http://www.yaml.org>)\nformats. Below is an example of 3x3 floating-point identity matrix A, stored in XML and YAML files\nusing CXCore functions:\nXML:\n@code{.xml}\n    <?xml version=\"1.0\">\n    <opencv_storage>\n    <A type_id=\"opencv-matrix\">\n      <rows>3</rows>\n      <cols>3</cols>\n      <dt>f</dt>\n      <data>1. 0. 0. 0. 1. 0. 0. 0. 1.</data>\n    </A>\n    </opencv_storage>\n@endcode\nYAML:\n@code{.yaml}\n    %YAML:1.0\n    A: !!opencv-matrix\n      rows: 3\n      cols: 3\n      dt: f\n      data: [ 1., 0., 0., 0., 1., 0., 0., 0., 1.]\n@endcode\nAs it can be seen from the examples, XML uses nested tags to represent hierarchy, while YAML uses\nindentation for that purpose (similar to the Python programming language).\n\nThe same functions can read and write data in both formats; the particular format is determined by\nthe extension of the opened file, \".xml\" for XML files and \".yml\" or \".yaml\" for YAML.\n */\ntypedef struct CvFileStorage CvFileStorage;\ntypedef struct CvFileNode CvFileNode;\n\n//! @} core_c\n\n#include \"opencv2/core/types.hpp\"\n#include \"opencv2/core/mat.hpp\"\n\nnamespace cv {\n\n/** @addtogroup core_xml\n\nXML/YAML file storages.     {#xml_storage}\n=======================\nWriting to a file storage.\n--------------------------\nYou can store and then restore various OpenCV data structures to/from XML (<http://www.w3c.org/XML>)\nor YAML (<http://www.yaml.org>) formats. Also, it is possible store and load arbitrarily complex\ndata structures, which include OpenCV data structures, as well as primitive data types (integer and\nfloating-point numbers and text strings) as their elements.\n\nUse the following procedure to write something to XML or YAML:\n-# Create new FileStorage and open it for writing. It can be done with a single call to\nFileStorage::FileStorage constructor that takes a filename, or you can use the default constructor\nand then call FileStorage::open. Format of the file (XML or YAML) is determined from the filename\nextension (\".xml\" and \".yml\"/\".yaml\", respectively)\n-# Write all the data you want using the streaming operator `<<`, just like in the case of STL\nstreams.\n-# Close the file using FileStorage::release. FileStorage destructor also closes the file.\n\nHere is an example:\n@code\n    #include \"opencv2/opencv.hpp\"\n    #include <time.h>\n\n    using namespace cv;\n\n    int main(int, char** argv)\n    {\n        FileStorage fs(\"test.yml\", FileStorage::WRITE);\n\n        fs << \"frameCount\" << 5;\n        time_t rawtime; time(&rawtime);\n        fs << \"calibrationDate\" << asctime(localtime(&rawtime));\n        Mat cameraMatrix = (Mat_<double>(3,3) << 1000, 0, 320, 0, 1000, 240, 0, 0, 1);\n        Mat distCoeffs = (Mat_<double>(5,1) << 0.1, 0.01, -0.001, 0, 0);\n        fs << \"cameraMatrix\" << cameraMatrix << \"distCoeffs\" << distCoeffs;\n        fs << \"features\" << \"[\";\n        for( int i = 0; i < 3; i++ )\n        {\n            int x = rand() % 640;\n            int y = rand() % 480;\n            uchar lbp = rand() % 256;\n\n            fs << \"{:\" << \"x\" << x << \"y\" << y << \"lbp\" << \"[:\";\n            for( int j = 0; j < 8; j++ )\n                fs << ((lbp >> j) & 1);\n            fs << \"]\" << \"}\";\n        }\n        fs << \"]\";\n        fs.release();\n        return 0;\n    }\n@endcode\nThe sample above stores to XML and integer, text string (calibration date), 2 matrices, and a custom\nstructure \"feature\", which includes feature coordinates and LBP (local binary pattern) value. Here\nis output of the sample:\n@code{.yaml}\n%YAML:1.0\nframeCount: 5\ncalibrationDate: \"Fri Jun 17 14:09:29 2011\\n\"\ncameraMatrix: !!opencv-matrix\n   rows: 3\n   cols: 3\n   dt: d\n   data: [ 1000., 0., 320., 0., 1000., 240., 0., 0., 1. ]\ndistCoeffs: !!opencv-matrix\n   rows: 5\n   cols: 1\n   dt: d\n   data: [ 1.0000000000000001e-01, 1.0000000000000000e-02,\n       -1.0000000000000000e-03, 0., 0. ]\nfeatures:\n   - { x:167, y:49, lbp:[ 1, 0, 0, 1, 1, 0, 1, 1 ] }\n   - { x:298, y:130, lbp:[ 0, 0, 0, 1, 0, 0, 1, 1 ] }\n   - { x:344, y:158, lbp:[ 1, 1, 0, 0, 0, 0, 1, 0 ] }\n@endcode\n\nAs an exercise, you can replace \".yml\" with \".xml\" in the sample above and see, how the\ncorresponding XML file will look like.\n\nSeveral things can be noted by looking at the sample code and the output:\n\n-   The produced YAML (and XML) consists of heterogeneous collections that can be nested. There are 2\n    types of collections: named collections (mappings) and unnamed collections (sequences). In mappings\n    each element has a name and is accessed by name. This is similar to structures and std::map in\n    C/C++ and dictionaries in Python. In sequences elements do not have names, they are accessed by\n    indices. This is similar to arrays and std::vector in C/C++ and lists, tuples in Python.\n    \"Heterogeneous\" means that elements of each single collection can have different types.\n\n    Top-level collection in YAML/XML is a mapping. Each matrix is stored as a mapping, and the matrix\n    elements are stored as a sequence. Then, there is a sequence of features, where each feature is\n    represented a mapping, and lbp value in a nested sequence.\n\n-   When you write to a mapping (a structure), you write element name followed by its value. When you\n    write to a sequence, you simply write the elements one by one. OpenCV data structures (such as\n    cv::Mat) are written in absolutely the same way as simple C data structures - using `<<`\n    operator.\n\n-   To write a mapping, you first write the special string `{` to the storage, then write the\n    elements as pairs (`fs << <element_name> << <element_value>`) and then write the closing\n    `}`.\n\n-   To write a sequence, you first write the special string `[`, then write the elements, then\n    write the closing `]`.\n\n-   In YAML (but not XML), mappings and sequences can be written in a compact Python-like inline\n    form. In the sample above matrix elements, as well as each feature, including its lbp value, is\n    stored in such inline form. To store a mapping/sequence in a compact form, put `:` after the\n    opening character, e.g. use `{:` instead of `{` and `[:` instead of `[`. When the\n    data is written to XML, those extra `:` are ignored.\n\nReading data from a file storage.\n---------------------------------\nTo read the previously written XML or YAML file, do the following:\n-#  Open the file storage using FileStorage::FileStorage constructor or FileStorage::open method.\n    In the current implementation the whole file is parsed and the whole representation of file\n    storage is built in memory as a hierarchy of file nodes (see FileNode)\n\n-#  Read the data you are interested in. Use FileStorage::operator [], FileNode::operator []\n    and/or FileNodeIterator.\n\n-#  Close the storage using FileStorage::release.\n\nHere is how to read the file created by the code sample above:\n@code\n    FileStorage fs2(\"test.yml\", FileStorage::READ);\n\n    // first method: use (type) operator on FileNode.\n    int frameCount = (int)fs2[\"frameCount\"];\n\n    String date;\n    // second method: use FileNode::operator >>\n    fs2[\"calibrationDate\"] >> date;\n\n    Mat cameraMatrix2, distCoeffs2;\n    fs2[\"cameraMatrix\"] >> cameraMatrix2;\n    fs2[\"distCoeffs\"] >> distCoeffs2;\n\n    cout << \"frameCount: \" << frameCount << endl\n         << \"calibration date: \" << date << endl\n         << \"camera matrix: \" << cameraMatrix2 << endl\n         << \"distortion coeffs: \" << distCoeffs2 << endl;\n\n    FileNode features = fs2[\"features\"];\n    FileNodeIterator it = features.begin(), it_end = features.end();\n    int idx = 0;\n    std::vector<uchar> lbpval;\n\n    // iterate through a sequence using FileNodeIterator\n    for( ; it != it_end; ++it, idx++ )\n    {\n        cout << \"feature #\" << idx << \": \";\n        cout << \"x=\" << (int)(*it)[\"x\"] << \", y=\" << (int)(*it)[\"y\"] << \", lbp: (\";\n        // you can also easily read numerical arrays using FileNode >> std::vector operator.\n        (*it)[\"lbp\"] >> lbpval;\n        for( int i = 0; i < (int)lbpval.size(); i++ )\n            cout << \" \" << (int)lbpval[i];\n        cout << \")\" << endl;\n    }\n    fs2.release();\n@endcode\n\nFormat specification    {#format_spec}\n--------------------\n`([count]{u|c|w|s|i|f|d})`... where the characters correspond to fundamental C++ types:\n-   `u` 8-bit unsigned number\n-   `c` 8-bit signed number\n-   `w` 16-bit unsigned number\n-   `s` 16-bit signed number\n-   `i` 32-bit signed number\n-   `f` single precision floating-point number\n-   `d` double precision floating-point number\n-   `r` pointer, 32 lower bits of which are written as a signed integer. The type can be used to\n    store structures with links between the elements.\n\n`count` is the optional counter of values of a given type. For example, `2if` means that each array\nelement is a structure of 2 integers, followed by a single-precision floating-point number. The\nequivalent notations of the above specification are `iif`, `2i1f` and so forth. Other examples: `u`\nmeans that the array consists of bytes, and `2d` means the array consists of pairs of doubles.\n\n@see @ref filestorage.cpp\n*/\n\n//! @{\n\n/** @example filestorage.cpp\nA complete example using the FileStorage interface\n*/\n\n////////////////////////// XML & YAML I/O //////////////////////////\n\nclass CV_EXPORTS FileNode;\nclass CV_EXPORTS FileNodeIterator;\n\n/** @brief XML/YAML file storage class that encapsulates all the information necessary for writing or reading\ndata to/from a file.\n */\nclass CV_EXPORTS_W FileStorage\n{\npublic:\n    //! file storage mode\n    enum Mode\n    {\n        READ        = 0, //!< value, open the file for reading\n        WRITE       = 1, //!< value, open the file for writing\n        APPEND      = 2, //!< value, open the file for appending\n        MEMORY      = 4, //!< flag, read data from source or write data to the internal buffer (which is\n                         //!< returned by FileStorage::release)\n        FORMAT_MASK = (7<<3), //!< mask for format flags\n        FORMAT_AUTO = 0,      //!< flag, auto format\n        FORMAT_XML  = (1<<3), //!< flag, XML format\n        FORMAT_YAML = (2<<3)  //!< flag, YAML format\n    };\n    enum\n    {\n        UNDEFINED      = 0,\n        VALUE_EXPECTED = 1,\n        NAME_EXPECTED  = 2,\n        INSIDE_MAP     = 4\n    };\n\n    /** @brief The constructors.\n\n    The full constructor opens the file. Alternatively you can use the default constructor and then\n    call FileStorage::open.\n     */\n    CV_WRAP FileStorage();\n\n    /** @overload\n    @param source Name of the file to open or the text string to read the data from. Extension of the\n    file (.xml or .yml/.yaml) determines its format (XML or YAML respectively). Also you can append .gz\n    to work with compressed files, for example myHugeMatrix.xml.gz. If both FileStorage::WRITE and\n    FileStorage::MEMORY flags are specified, source is used just to specify the output file format (e.g.\n    mydata.xml, .yml etc.).\n    @param flags Mode of operation. See  FileStorage::Mode\n    @param encoding Encoding of the file. Note that UTF-16 XML encoding is not supported currently and\n    you should use 8-bit encoding instead of it.\n    */\n    CV_WRAP FileStorage(const String& source, int flags, const String& encoding=String());\n\n    /** @overload */\n    FileStorage(CvFileStorage* fs, bool owning=true);\n\n    //! the destructor. calls release()\n    virtual ~FileStorage();\n\n    /** @brief Opens a file.\n\n    See description of parameters in FileStorage::FileStorage. The method calls FileStorage::release\n    before opening the file.\n    @param filename Name of the file to open or the text string to read the data from.\n       Extension of the file (.xml or .yml/.yaml) determines its format (XML or YAML respectively).\n        Also you can append .gz to work with compressed files, for example myHugeMatrix.xml.gz. If both\n        FileStorage::WRITE and FileStorage::MEMORY flags are specified, source is used just to specify\n        the output file format (e.g. mydata.xml, .yml etc.).\n    @param flags Mode of operation. One of FileStorage::Mode\n    @param encoding Encoding of the file. Note that UTF-16 XML encoding is not supported currently and\n    you should use 8-bit encoding instead of it.\n     */\n    CV_WRAP virtual bool open(const String& filename, int flags, const String& encoding=String());\n\n    /** @brief Checks whether the file is opened.\n\n    @returns true if the object is associated with the current file and false otherwise. It is a\n    good practice to call this method after you tried to open a file.\n     */\n    CV_WRAP virtual bool isOpened() const;\n\n    /** @brief Closes the file and releases all the memory buffers.\n\n    Call this method after all I/O operations with the storage are finished.\n     */\n    CV_WRAP virtual void release();\n\n    /** @brief Closes the file and releases all the memory buffers.\n\n    Call this method after all I/O operations with the storage are finished. If the storage was\n    opened for writing data and FileStorage::WRITE was specified\n     */\n    CV_WRAP virtual String releaseAndGetString();\n\n    /** @brief Returns the first element of the top-level mapping.\n    @returns The first element of the top-level mapping.\n     */\n    CV_WRAP FileNode getFirstTopLevelNode() const;\n\n    /** @brief Returns the top-level mapping\n    @param streamidx Zero-based index of the stream. In most cases there is only one stream in the file.\n    However, YAML supports multiple streams and so there can be several.\n    @returns The top-level mapping.\n     */\n    CV_WRAP FileNode root(int streamidx=0) const;\n\n    /** @brief Returns the specified element of the top-level mapping.\n    @param nodename Name of the file node.\n    @returns Node with the given name.\n     */\n    FileNode operator[](const String& nodename) const;\n\n    /** @overload */\n    CV_WRAP FileNode operator[](const char* nodename) const;\n\n    /** @brief Returns the obsolete C FileStorage structure.\n    @returns Pointer to the underlying C FileStorage structure\n     */\n    CvFileStorage* operator *() { return fs.get(); }\n\n    /** @overload */\n    const CvFileStorage* operator *() const { return fs.get(); }\n\n    /** @brief Writes multiple numbers.\n\n    Writes one or more numbers of the specified format to the currently written structure. Usually it is\n    more convenient to use operator `<<` instead of this method.\n    @param fmt Specification of each array element, see @ref format_spec \"format specification\"\n    @param vec Pointer to the written array.\n    @param len Number of the uchar elements to write.\n     */\n    void writeRaw( const String& fmt, const uchar* vec, size_t len );\n\n    /** @brief Writes the registered C structure (CvMat, CvMatND, CvSeq).\n    @param name Name of the written object.\n    @param obj Pointer to the object.\n    @see ocvWrite for details.\n     */\n    void writeObj( const String& name, const void* obj );\n\n    /** @brief Returns the normalized object name for the specified name of a file.\n    @param filename Name of a file\n    @returns The normalized object name.\n     */\n    static String getDefaultObjectName(const String& filename);\n\n    Ptr<CvFileStorage> fs; //!< the underlying C FileStorage structure\n    String elname; //!< the currently written element\n    std::vector<char> structs; //!< the stack of written structures\n    int state; //!< the writer state\n};\n\ntemplate<> CV_EXPORTS void DefaultDeleter<CvFileStorage>::operator ()(CvFileStorage* obj) const;\n\n/** @brief File Storage Node class.\n\nThe node is used to store each and every element of the file storage opened for reading. When\nXML/YAML file is read, it is first parsed and stored in the memory as a hierarchical collection of\nnodes. Each node can be a “leaf” that is contain a single number or a string, or be a collection of\nother nodes. There can be named collections (mappings) where each element has a name and it is\naccessed by a name, and ordered collections (sequences) where elements do not have names but rather\naccessed by index. Type of the file node can be determined using FileNode::type method.\n\nNote that file nodes are only used for navigating file storages opened for reading. When a file\nstorage is opened for writing, no data is stored in memory after it is written.\n */\nclass CV_EXPORTS_W_SIMPLE FileNode\n{\npublic:\n    //! type of the file storage node\n    enum Type\n    {\n        NONE      = 0, //!< empty node\n        INT       = 1, //!< an integer\n        REAL      = 2, //!< floating-point number\n        FLOAT     = REAL, //!< synonym or REAL\n        STR       = 3, //!< text string in UTF-8 encoding\n        STRING    = STR, //!< synonym for STR\n        REF       = 4, //!< integer of size size_t. Typically used for storing complex dynamic structures where some elements reference the others\n        SEQ       = 5, //!< sequence\n        MAP       = 6, //!< mapping\n        TYPE_MASK = 7,\n        FLOW      = 8,  //!< compact representation of a sequence or mapping. Used only by YAML writer\n        USER      = 16, //!< a registered object (e.g. a matrix)\n        EMPTY     = 32, //!< empty structure (sequence or mapping)\n        NAMED     = 64  //!< the node has a name (i.e. it is element of a mapping)\n    };\n    /** @brief The constructors.\n\n    These constructors are used to create a default file node, construct it from obsolete structures or\n    from the another file node.\n     */\n    CV_WRAP FileNode();\n\n    /** @overload\n    @param fs Pointer to the obsolete file storage structure.\n    @param node File node to be used as initialization for the created file node.\n    */\n    FileNode(const CvFileStorage* fs, const CvFileNode* node);\n\n    /** @overload\n    @param node File node to be used as initialization for the created file node.\n    */\n    FileNode(const FileNode& node);\n\n    /** @brief Returns element of a mapping node or a sequence node.\n    @param nodename Name of an element in the mapping node.\n    @returns Returns the element with the given identifier.\n     */\n    FileNode operator[](const String& nodename) const;\n\n    /** @overload\n    @param nodename Name of an element in the mapping node.\n    */\n    CV_WRAP FileNode operator[](const char* nodename) const;\n\n    /** @overload\n    @param i Index of an element in the sequence node.\n    */\n    CV_WRAP FileNode operator[](int i) const;\n\n    /** @brief Returns type of the node.\n    @returns Type of the node. See FileNode::Type\n     */\n    CV_WRAP int type() const;\n\n    //! returns true if the node is empty\n    CV_WRAP bool empty() const;\n    //! returns true if the node is a \"none\" object\n    CV_WRAP bool isNone() const;\n    //! returns true if the node is a sequence\n    CV_WRAP bool isSeq() const;\n    //! returns true if the node is a mapping\n    CV_WRAP bool isMap() const;\n    //! returns true if the node is an integer\n    CV_WRAP bool isInt() const;\n    //! returns true if the node is a floating-point number\n    CV_WRAP bool isReal() const;\n    //! returns true if the node is a text string\n    CV_WRAP bool isString() const;\n    //! returns true if the node has a name\n    CV_WRAP bool isNamed() const;\n    //! returns the node name or an empty string if the node is nameless\n    CV_WRAP String name() const;\n    //! returns the number of elements in the node, if it is a sequence or mapping, or 1 otherwise.\n    CV_WRAP size_t size() const;\n    //! returns the node content as an integer. If the node stores floating-point number, it is rounded.\n    operator int() const;\n    //! returns the node content as float\n    operator float() const;\n    //! returns the node content as double\n    operator double() const;\n    //! returns the node content as text string\n    operator String() const;\n#ifndef OPENCV_NOSTL\n    operator std::string() const;\n#endif\n\n    //! returns pointer to the underlying file node\n    CvFileNode* operator *();\n    //! returns pointer to the underlying file node\n    const CvFileNode* operator* () const;\n\n    //! returns iterator pointing to the first node element\n    FileNodeIterator begin() const;\n    //! returns iterator pointing to the element following the last node element\n    FileNodeIterator end() const;\n\n    /** @brief Reads node elements to the buffer with the specified format.\n\n    Usually it is more convenient to use operator `>>` instead of this method.\n    @param fmt Specification of each array element. See @ref format_spec \"format specification\"\n    @param vec Pointer to the destination array.\n    @param len Number of elements to read. If it is greater than number of remaining elements then all\n    of them will be read.\n     */\n    void readRaw( const String& fmt, uchar* vec, size_t len ) const;\n\n    //! reads the registered object and returns pointer to it\n    void* readObj() const;\n\n    // do not use wrapper pointer classes for better efficiency\n    const CvFileStorage* fs;\n    const CvFileNode* node;\n};\n\n\n/** @brief used to iterate through sequences and mappings.\n\nA standard STL notation, with node.begin(), node.end() denoting the beginning and the end of a\nsequence, stored in node. See the data reading sample in the beginning of the section.\n */\nclass CV_EXPORTS FileNodeIterator\n{\npublic:\n    /** @brief The constructors.\n\n    These constructors are used to create a default iterator, set it to specific element in a file node\n    or construct it from another iterator.\n     */\n    FileNodeIterator();\n\n    /** @overload\n    @param fs File storage for the iterator.\n    @param node File node for the iterator.\n    @param ofs Index of the element in the node. The created iterator will point to this element.\n    */\n    FileNodeIterator(const CvFileStorage* fs, const CvFileNode* node, size_t ofs=0);\n\n    /** @overload\n    @param it Iterator to be used as initialization for the created iterator.\n    */\n    FileNodeIterator(const FileNodeIterator& it);\n\n    //! returns the currently observed element\n    FileNode operator *() const;\n    //! accesses the currently observed element methods\n    FileNode operator ->() const;\n\n    //! moves iterator to the next node\n    FileNodeIterator& operator ++ ();\n    //! moves iterator to the next node\n    FileNodeIterator operator ++ (int);\n    //! moves iterator to the previous node\n    FileNodeIterator& operator -- ();\n    //! moves iterator to the previous node\n    FileNodeIterator operator -- (int);\n    //! moves iterator forward by the specified offset (possibly negative)\n    FileNodeIterator& operator += (int ofs);\n    //! moves iterator backward by the specified offset (possibly negative)\n    FileNodeIterator& operator -= (int ofs);\n\n    /** @brief Reads node elements to the buffer with the specified format.\n\n    Usually it is more convenient to use operator `>>` instead of this method.\n    @param fmt Specification of each array element. See @ref format_spec \"format specification\"\n    @param vec Pointer to the destination array.\n    @param maxCount Number of elements to read. If it is greater than number of remaining elements then\n    all of them will be read.\n     */\n    FileNodeIterator& readRaw( const String& fmt, uchar* vec,\n                               size_t maxCount=(size_t)INT_MAX );\n\n    struct SeqReader\n    {\n      int          header_size;\n      void*        seq;        /* sequence, beign read; CvSeq      */\n      void*        block;      /* current block;        CvSeqBlock */\n      schar*       ptr;        /* pointer to element be read next */\n      schar*       block_min;  /* pointer to the beginning of block */\n      schar*       block_max;  /* pointer to the end of block */\n      int          delta_index;/* = seq->first->start_index   */\n      schar*       prev_elem;  /* pointer to previous element */\n    };\n\n    const CvFileStorage* fs;\n    const CvFileNode* container;\n    SeqReader reader;\n    size_t remaining;\n};\n\n//! @} core_xml\n\n/////////////////// XML & YAML I/O implementation //////////////////\n\n//! @relates cv::FileStorage\n//! @{\n\nCV_EXPORTS void write( FileStorage& fs, const String& name, int value );\nCV_EXPORTS void write( FileStorage& fs, const String& name, float value );\nCV_EXPORTS void write( FileStorage& fs, const String& name, double value );\nCV_EXPORTS void write( FileStorage& fs, const String& name, const String& value );\nCV_EXPORTS void write( FileStorage& fs, const String& name, const Mat& value );\nCV_EXPORTS void write( FileStorage& fs, const String& name, const SparseMat& value );\nCV_EXPORTS void write( FileStorage& fs, const String& name, const std::vector<KeyPoint>& value);\nCV_EXPORTS void write( FileStorage& fs, const String& name, const std::vector<DMatch>& value);\n\nCV_EXPORTS void writeScalar( FileStorage& fs, int value );\nCV_EXPORTS void writeScalar( FileStorage& fs, float value );\nCV_EXPORTS void writeScalar( FileStorage& fs, double value );\nCV_EXPORTS void writeScalar( FileStorage& fs, const String& value );\n\n//! @}\n\n//! @relates cv::FileNode\n//! @{\n\nCV_EXPORTS void read(const FileNode& node, int& value, int default_value);\nCV_EXPORTS void read(const FileNode& node, float& value, float default_value);\nCV_EXPORTS void read(const FileNode& node, double& value, double default_value);\nCV_EXPORTS void read(const FileNode& node, String& value, const String& default_value);\nCV_EXPORTS void read(const FileNode& node, Mat& mat, const Mat& default_mat = Mat() );\nCV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat = SparseMat() );\nCV_EXPORTS void read(const FileNode& node, std::vector<KeyPoint>& keypoints);\nCV_EXPORTS void read(const FileNode& node, std::vector<DMatch>& matches);\n\ntemplate<typename _Tp> static inline void read(const FileNode& node, Point_<_Tp>& value, const Point_<_Tp>& default_value)\n{\n    std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;\n    value = temp.size() != 2 ? default_value : Point_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]));\n}\n\ntemplate<typename _Tp> static inline void read(const FileNode& node, Point3_<_Tp>& value, const Point3_<_Tp>& default_value)\n{\n    std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;\n    value = temp.size() != 3 ? default_value : Point3_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]),\n                                                            saturate_cast<_Tp>(temp[2]));\n}\n\ntemplate<typename _Tp> static inline void read(const FileNode& node, Size_<_Tp>& value, const Size_<_Tp>& default_value)\n{\n    std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;\n    value = temp.size() != 2 ? default_value : Size_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]));\n}\n\ntemplate<typename _Tp> static inline void read(const FileNode& node, Complex<_Tp>& value, const Complex<_Tp>& default_value)\n{\n    std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;\n    value = temp.size() != 2 ? default_value : Complex<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]));\n}\n\ntemplate<typename _Tp> static inline void read(const FileNode& node, Rect_<_Tp>& value, const Rect_<_Tp>& default_value)\n{\n    std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;\n    value = temp.size() != 4 ? default_value : Rect_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]),\n                                                          saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3]));\n}\n\ntemplate<typename _Tp, int cn> static inline void read(const FileNode& node, Vec<_Tp, cn>& value, const Vec<_Tp, cn>& default_value)\n{\n    std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;\n    value = temp.size() != cn ? default_value : Vec<_Tp, cn>(&temp[0]);\n}\n\ntemplate<typename _Tp> static inline void read(const FileNode& node, Scalar_<_Tp>& value, const Scalar_<_Tp>& default_value)\n{\n    std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;\n    value = temp.size() != 4 ? default_value : Scalar_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]),\n                                                            saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3]));\n}\n\nstatic inline void read(const FileNode& node, Range& value, const Range& default_value)\n{\n    Point2i temp(value.start, value.end); const Point2i default_temp = Point2i(default_value.start, default_value.end);\n    read(node, temp, default_temp);\n    value.start = temp.x; value.end = temp.y;\n}\n\n//! @}\n\n/** @brief Writes string to a file storage.\n@relates cv::FileStorage\n */\nCV_EXPORTS FileStorage& operator << (FileStorage& fs, const String& str);\n\n//! @cond IGNORED\n\nnamespace internal\n{\n    class CV_EXPORTS WriteStructContext\n    {\n    public:\n        WriteStructContext(FileStorage& _fs, const String& name, int flags, const String& typeName = String());\n        ~WriteStructContext();\n    private:\n        FileStorage* fs;\n    };\n\n    template<typename _Tp, int numflag> class VecWriterProxy\n    {\n    public:\n        VecWriterProxy( FileStorage* _fs ) : fs(_fs) {}\n        void operator()(const std::vector<_Tp>& vec) const\n        {\n            size_t count = vec.size();\n            for (size_t i = 0; i < count; i++)\n                write(*fs, vec[i]);\n        }\n    private:\n        FileStorage* fs;\n    };\n\n    template<typename _Tp> class VecWriterProxy<_Tp, 1>\n    {\n    public:\n        VecWriterProxy( FileStorage* _fs ) : fs(_fs) {}\n        void operator()(const std::vector<_Tp>& vec) const\n        {\n            int _fmt = DataType<_Tp>::fmt;\n            char fmt[] = { (char)((_fmt >> 8) + '1'), (char)_fmt, '\\0' };\n            fs->writeRaw(fmt, !vec.empty() ? (uchar*)&vec[0] : 0, vec.size() * sizeof(_Tp));\n        }\n    private:\n        FileStorage* fs;\n    };\n\n    template<typename _Tp, int numflag> class VecReaderProxy\n    {\n    public:\n        VecReaderProxy( FileNodeIterator* _it ) : it(_it) {}\n        void operator()(std::vector<_Tp>& vec, size_t count) const\n        {\n            count = std::min(count, it->remaining);\n            vec.resize(count);\n            for (size_t i = 0; i < count; i++, ++(*it))\n                read(**it, vec[i], _Tp());\n        }\n    private:\n        FileNodeIterator* it;\n    };\n\n    template<typename _Tp> class VecReaderProxy<_Tp, 1>\n    {\n    public:\n        VecReaderProxy( FileNodeIterator* _it ) : it(_it) {}\n        void operator()(std::vector<_Tp>& vec, size_t count) const\n        {\n            size_t remaining = it->remaining;\n            size_t cn = DataType<_Tp>::channels;\n            int _fmt = DataType<_Tp>::fmt;\n            char fmt[] = { (char)((_fmt >> 8)+'1'), (char)_fmt, '\\0' };\n            size_t remaining1 = remaining / cn;\n            count = count < remaining1 ? count : remaining1;\n            vec.resize(count);\n            it->readRaw(fmt, !vec.empty() ? (uchar*)&vec[0] : 0, count*sizeof(_Tp));\n        }\n    private:\n        FileNodeIterator* it;\n    };\n\n} // internal\n\n//! @endcond\n\n//! @relates cv::FileStorage\n//! @{\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const _Tp& value)\n{\n    write(fs, String(), value);\n}\n\ntemplate<> inline\nvoid write( FileStorage& fs, const int& value )\n{\n    writeScalar(fs, value);\n}\n\ntemplate<> inline\nvoid write( FileStorage& fs, const float& value )\n{\n    writeScalar(fs, value);\n}\n\ntemplate<> inline\nvoid write( FileStorage& fs, const double& value )\n{\n    writeScalar(fs, value);\n}\n\ntemplate<> inline\nvoid write( FileStorage& fs, const String& value )\n{\n    writeScalar(fs, value);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const Point_<_Tp>& pt )\n{\n    write(fs, pt.x);\n    write(fs, pt.y);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const Point3_<_Tp>& pt )\n{\n    write(fs, pt.x);\n    write(fs, pt.y);\n    write(fs, pt.z);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const Size_<_Tp>& sz )\n{\n    write(fs, sz.width);\n    write(fs, sz.height);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const Complex<_Tp>& c )\n{\n    write(fs, c.re);\n    write(fs, c.im);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const Rect_<_Tp>& r )\n{\n    write(fs, r.x);\n    write(fs, r.y);\n    write(fs, r.width);\n    write(fs, r.height);\n}\n\ntemplate<typename _Tp, int cn> static inline\nvoid write(FileStorage& fs, const Vec<_Tp, cn>& v )\n{\n    for(int i = 0; i < cn; i++)\n        write(fs, v.val[i]);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const Scalar_<_Tp>& s )\n{\n    write(fs, s.val[0]);\n    write(fs, s.val[1]);\n    write(fs, s.val[2]);\n    write(fs, s.val[3]);\n}\n\nstatic inline\nvoid write(FileStorage& fs, const Range& r )\n{\n    write(fs, r.start);\n    write(fs, r.end);\n}\n\ntemplate<typename _Tp> static inline\nvoid write( FileStorage& fs, const std::vector<_Tp>& vec )\n{\n    cv::internal::VecWriterProxy<_Tp, DataType<_Tp>::fmt != 0> w(&fs);\n    w(vec);\n}\n\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const String& name, const Point_<_Tp>& pt )\n{\n    cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);\n    write(fs, pt);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const String& name, const Point3_<_Tp>& pt )\n{\n    cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);\n    write(fs, pt);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const String& name, const Size_<_Tp>& sz )\n{\n    cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);\n    write(fs, sz);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const String& name, const Complex<_Tp>& c )\n{\n    cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);\n    write(fs, c);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const String& name, const Rect_<_Tp>& r )\n{\n    cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);\n    write(fs, r);\n}\n\ntemplate<typename _Tp, int cn> static inline\nvoid write(FileStorage& fs, const String& name, const Vec<_Tp, cn>& v )\n{\n    cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);\n    write(fs, v);\n}\n\ntemplate<typename _Tp> static inline\nvoid write(FileStorage& fs, const String& name, const Scalar_<_Tp>& s )\n{\n    cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);\n    write(fs, s);\n}\n\nstatic inline\nvoid write(FileStorage& fs, const String& name, const Range& r )\n{\n    cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW);\n    write(fs, r);\n}\n\ntemplate<typename _Tp> static inline\nvoid write( FileStorage& fs, const String& name, const std::vector<_Tp>& vec )\n{\n    cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+(DataType<_Tp>::fmt != 0 ? FileNode::FLOW : 0));\n    write(fs, vec);\n}\n\n//! @} FileStorage\n\n//! @relates cv::FileNode\n//! @{\n\nstatic inline\nvoid read(const FileNode& node, bool& value, bool default_value)\n{\n    int temp;\n    read(node, temp, (int)default_value);\n    value = temp != 0;\n}\n\nstatic inline\nvoid read(const FileNode& node, uchar& value, uchar default_value)\n{\n    int temp;\n    read(node, temp, (int)default_value);\n    value = saturate_cast<uchar>(temp);\n}\n\nstatic inline\nvoid read(const FileNode& node, schar& value, schar default_value)\n{\n    int temp;\n    read(node, temp, (int)default_value);\n    value = saturate_cast<schar>(temp);\n}\n\nstatic inline\nvoid read(const FileNode& node, ushort& value, ushort default_value)\n{\n    int temp;\n    read(node, temp, (int)default_value);\n    value = saturate_cast<ushort>(temp);\n}\n\nstatic inline\nvoid read(const FileNode& node, short& value, short default_value)\n{\n    int temp;\n    read(node, temp, (int)default_value);\n    value = saturate_cast<short>(temp);\n}\n\ntemplate<typename _Tp> static inline\nvoid read( FileNodeIterator& it, std::vector<_Tp>& vec, size_t maxCount = (size_t)INT_MAX )\n{\n    cv::internal::VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it);\n    r(vec, maxCount);\n}\n\ntemplate<typename _Tp> static inline\nvoid read( const FileNode& node, std::vector<_Tp>& vec, const std::vector<_Tp>& default_value = std::vector<_Tp>() )\n{\n    if(!node.node)\n        vec = default_value;\n    else\n    {\n        FileNodeIterator it = node.begin();\n        read( it, vec );\n    }\n}\n\n//! @} FileNode\n\n//! @relates cv::FileStorage\n//! @{\n\n/** @brief Writes data to a file storage.\n */\ntemplate<typename _Tp> static inline\nFileStorage& operator << (FileStorage& fs, const _Tp& value)\n{\n    if( !fs.isOpened() )\n        return fs;\n    if( fs.state == FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP )\n        CV_Error( Error::StsError, \"No element name has been given\" );\n    write( fs, fs.elname, value );\n    if( fs.state & FileStorage::INSIDE_MAP )\n        fs.state = FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP;\n    return fs;\n}\n\n/** @brief Writes data to a file storage.\n */\nstatic inline\nFileStorage& operator << (FileStorage& fs, const char* str)\n{\n    return (fs << String(str));\n}\n\n/** @brief Writes data to a file storage.\n */\nstatic inline\nFileStorage& operator << (FileStorage& fs, char* value)\n{\n    return (fs << String(value));\n}\n\n//! @} FileStorage\n\n//! @relates cv::FileNodeIterator\n//! @{\n\n/** @brief Reads data from a file storage.\n */\ntemplate<typename _Tp> static inline\nFileNodeIterator& operator >> (FileNodeIterator& it, _Tp& value)\n{\n    read( *it, value, _Tp());\n    return ++it;\n}\n\n/** @brief Reads data from a file storage.\n */\ntemplate<typename _Tp> static inline\nFileNodeIterator& operator >> (FileNodeIterator& it, std::vector<_Tp>& vec)\n{\n    cv::internal::VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it);\n    r(vec, (size_t)INT_MAX);\n    return it;\n}\n\n//! @} FileNodeIterator\n\n//! @relates cv::FileNode\n//! @{\n\n/** @brief Reads data from a file storage.\n */\ntemplate<typename _Tp> static inline\nvoid operator >> (const FileNode& n, _Tp& value)\n{\n    read( n, value, _Tp());\n}\n\n/** @brief Reads data from a file storage.\n */\ntemplate<typename _Tp> static inline\nvoid operator >> (const FileNode& n, std::vector<_Tp>& vec)\n{\n    FileNodeIterator it = n.begin();\n    it >> vec;\n}\n\n//! @} FileNode\n\n//! @relates cv::FileNodeIterator\n//! @{\n\nstatic inline\nbool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2)\n{\n    return it1.fs == it2.fs && it1.container == it2.container &&\n        it1.reader.ptr == it2.reader.ptr && it1.remaining == it2.remaining;\n}\n\nstatic inline\nbool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2)\n{\n    return !(it1 == it2);\n}\n\nstatic inline\nptrdiff_t operator - (const FileNodeIterator& it1, const FileNodeIterator& it2)\n{\n    return it2.remaining - it1.remaining;\n}\n\nstatic inline\nbool operator < (const FileNodeIterator& it1, const FileNodeIterator& it2)\n{\n    return it1.remaining > it2.remaining;\n}\n\n//! @} FileNodeIterator\n\n//! @cond IGNORED\n\ninline FileNode FileStorage::getFirstTopLevelNode() const { FileNode r = root(); FileNodeIterator it = r.begin(); return it != r.end() ? *it : FileNode(); }\ninline FileNode::FileNode() : fs(0), node(0) {}\ninline FileNode::FileNode(const CvFileStorage* _fs, const CvFileNode* _node) : fs(_fs), node(_node) {}\ninline FileNode::FileNode(const FileNode& _node) : fs(_node.fs), node(_node.node) {}\ninline bool FileNode::empty() const    { return node   == 0;    }\ninline bool FileNode::isNone() const   { return type() == NONE; }\ninline bool FileNode::isSeq() const    { return type() == SEQ;  }\ninline bool FileNode::isMap() const    { return type() == MAP;  }\ninline bool FileNode::isInt() const    { return type() == INT;  }\ninline bool FileNode::isReal() const   { return type() == REAL; }\ninline bool FileNode::isString() const { return type() == STR;  }\ninline CvFileNode* FileNode::operator *() { return (CvFileNode*)node; }\ninline const CvFileNode* FileNode::operator* () const { return node; }\ninline FileNode::operator int() const    { int value;    read(*this, value, 0);     return value; }\ninline FileNode::operator float() const  { float value;  read(*this, value, 0.f);   return value; }\ninline FileNode::operator double() const { double value; read(*this, value, 0.);    return value; }\ninline FileNode::operator String() const { String value; read(*this, value, value); return value; }\ninline FileNodeIterator FileNode::begin() const { return FileNodeIterator(fs, node); }\ninline FileNodeIterator FileNode::end() const   { return FileNodeIterator(fs, node, size()); }\ninline void FileNode::readRaw( const String& fmt, uchar* vec, size_t len ) const { begin().readRaw( fmt, vec, len ); }\ninline FileNode FileNodeIterator::operator *() const  { return FileNode(fs, (const CvFileNode*)(const void*)reader.ptr); }\ninline FileNode FileNodeIterator::operator ->() const { return FileNode(fs, (const CvFileNode*)(const void*)reader.ptr); }\ninline String::String(const FileNode& fn): cstr_(0), len_(0) { read(fn, *this, *this); }\n\n//! @endcond\n\n} // cv\n\n#endif // __OPENCV_CORE_PERSISTENCE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/private.cuda.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_PRIVATE_CUDA_HPP__\n#define __OPENCV_CORE_PRIVATE_CUDA_HPP__\n\n#ifndef __OPENCV_BUILD\n#  error this is a private header which should not be used from outside of the OpenCV library\n#endif\n\n#include \"cvconfig.h\"\n\n#include \"opencv2/core/cvdef.h\"\n#include \"opencv2/core/base.hpp\"\n\n#include \"opencv2/core/cuda.hpp\"\n\n#ifdef HAVE_CUDA\n#  include <cuda.h>\n#  include <cuda_runtime.h>\n#  include <npp.h>\n#  include \"opencv2/core/cuda_stream_accessor.hpp\"\n#  include \"opencv2/core/cuda/common.hpp\"\n\n#  define NPP_VERSION (NPP_VERSION_MAJOR * 1000 + NPP_VERSION_MINOR * 100 + NPP_VERSION_BUILD)\n\n#  define CUDART_MINIMUM_REQUIRED_VERSION 4020\n\n#  if (CUDART_VERSION < CUDART_MINIMUM_REQUIRED_VERSION)\n#    error \"Insufficient Cuda Runtime library version, please update it.\"\n#  endif\n\n#  if defined(CUDA_ARCH_BIN_OR_PTX_10)\n#    error \"OpenCV CUDA module doesn't support NVIDIA compute capability 1.0\"\n#  endif\n#endif\n\n//! @cond IGNORED\n\nnamespace cv { namespace cuda {\n    CV_EXPORTS cv::String getNppErrorMessage(int code);\n    CV_EXPORTS cv::String getCudaDriverApiErrorMessage(int code);\n\n    CV_EXPORTS GpuMat getInputMat(InputArray _src, Stream& stream);\n\n    CV_EXPORTS GpuMat getOutputMat(OutputArray _dst, int rows, int cols, int type, Stream& stream);\n    static inline GpuMat getOutputMat(OutputArray _dst, Size size, int type, Stream& stream)\n    {\n        return getOutputMat(_dst, size.height, size.width, type, stream);\n    }\n\n    CV_EXPORTS void syncOutput(const GpuMat& dst, OutputArray _dst, Stream& stream);\n}}\n\n#ifndef HAVE_CUDA\n\nstatic inline void throw_no_cuda() { CV_Error(cv::Error::GpuNotSupported, \"The library is compiled without CUDA support\"); }\n\n#else // HAVE_CUDA\n\nstatic inline void throw_no_cuda() { CV_Error(cv::Error::StsNotImplemented, \"The called functionality is disabled for current build or platform\"); }\n\nnamespace cv { namespace cuda\n{\n    class CV_EXPORTS BufferPool\n    {\n    public:\n        explicit BufferPool(Stream& stream);\n\n        GpuMat getBuffer(int rows, int cols, int type);\n        GpuMat getBuffer(Size size, int type) { return getBuffer(size.height, size.width, type); }\n\n        GpuMat::Allocator* getAllocator() const { return allocator_; }\n\n    private:\n        GpuMat::Allocator* allocator_;\n    };\n\n    static inline void checkNppError(int code, const char* file, const int line, const char* func)\n    {\n        if (code < 0)\n            cv::error(cv::Error::GpuApiCallError, getNppErrorMessage(code), func, file, line);\n    }\n\n    static inline void checkCudaDriverApiError(int code, const char* file, const int line, const char* func)\n    {\n        if (code != CUDA_SUCCESS)\n            cv::error(cv::Error::GpuApiCallError, getCudaDriverApiErrorMessage(code), func, file, line);\n    }\n\n    template<int n> struct NPPTypeTraits;\n    template<> struct NPPTypeTraits<CV_8U>  { typedef Npp8u npp_type; };\n    template<> struct NPPTypeTraits<CV_8S>  { typedef Npp8s npp_type; };\n    template<> struct NPPTypeTraits<CV_16U> { typedef Npp16u npp_type; };\n    template<> struct NPPTypeTraits<CV_16S> { typedef Npp16s npp_type; };\n    template<> struct NPPTypeTraits<CV_32S> { typedef Npp32s npp_type; };\n    template<> struct NPPTypeTraits<CV_32F> { typedef Npp32f npp_type; };\n    template<> struct NPPTypeTraits<CV_64F> { typedef Npp64f npp_type; };\n\n    class NppStreamHandler\n    {\n    public:\n        inline explicit NppStreamHandler(Stream& newStream)\n        {\n            oldStream = nppGetStream();\n            nppSetStream(StreamAccessor::getStream(newStream));\n        }\n\n        inline explicit NppStreamHandler(cudaStream_t newStream)\n        {\n            oldStream = nppGetStream();\n            nppSetStream(newStream);\n        }\n\n        inline ~NppStreamHandler()\n        {\n            nppSetStream(oldStream);\n        }\n\n    private:\n        cudaStream_t oldStream;\n    };\n}}\n\n#define nppSafeCall(expr)  cv::cuda::checkNppError(expr, __FILE__, __LINE__, CV_Func)\n#define cuSafeCall(expr)  cv::cuda::checkCudaDriverApiError(expr, __FILE__, __LINE__, CV_Func)\n\n#endif // HAVE_CUDA\n\n//! @endcond\n\n#endif // __OPENCV_CORE_CUDA_PRIVATE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/private.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_PRIVATE_HPP__\n#define __OPENCV_CORE_PRIVATE_HPP__\n\n#ifndef __OPENCV_BUILD\n#  error this is a private header which should not be used from outside of the OpenCV library\n#endif\n\n#include \"opencv2/core.hpp\"\n#include \"cvconfig.h\"\n\n#ifdef HAVE_EIGEN\n#  if defined __GNUC__ && defined __APPLE__\n#    pragma GCC diagnostic ignored \"-Wshadow\"\n#  endif\n#  include <Eigen/Core>\n#  include \"opencv2/core/eigen.hpp\"\n#endif\n\n#ifdef HAVE_TBB\n#  include \"tbb/tbb_stddef.h\"\n#  if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202\n#    include \"tbb/tbb.h\"\n#    include \"tbb/task.h\"\n#    undef min\n#    undef max\n#  else\n#    undef HAVE_TBB\n#  endif\n#endif\n\n//! @cond IGNORED\n\nnamespace cv\n{\n#ifdef HAVE_TBB\n\n    typedef tbb::blocked_range<int> BlockedRange;\n\n    template<typename Body> static inline\n    void parallel_for( const BlockedRange& range, const Body& body )\n    {\n        tbb::parallel_for(range, body);\n    }\n\n    typedef tbb::split Split;\n\n    template<typename Body> static inline\n    void parallel_reduce( const BlockedRange& range, Body& body )\n    {\n        tbb::parallel_reduce(range, body);\n    }\n\n    typedef tbb::concurrent_vector<Rect> ConcurrentRectVector;\n#else\n    class BlockedRange\n    {\n    public:\n        BlockedRange() : _begin(0), _end(0), _grainsize(0) {}\n        BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {}\n        int begin() const { return _begin; }\n        int end() const { return _end; }\n        int grainsize() const { return _grainsize; }\n\n    protected:\n        int _begin, _end, _grainsize;\n    };\n\n    template<typename Body> static inline\n    void parallel_for( const BlockedRange& range, const Body& body )\n    {\n        body(range);\n    }\n    typedef std::vector<Rect> ConcurrentRectVector;\n\n    class Split {};\n\n    template<typename Body> static inline\n    void parallel_reduce( const BlockedRange& range, Body& body )\n    {\n        body(range);\n    }\n#endif\n\n    // Returns a static string if there is a parallel framework,\n    // NULL otherwise.\n    CV_EXPORTS const char* currentParallelFramework();\n} //namespace cv\n\n/****************************************************************************************\\\n*                                  Common declarations                                   *\n\\****************************************************************************************/\n\n/* the alignment of all the allocated buffers */\n#define  CV_MALLOC_ALIGN    16\n\n/* IEEE754 constants and macros */\n#define  CV_TOGGLE_FLT(x) ((x)^((int)(x) < 0 ? 0x7fffffff : 0))\n#define  CV_TOGGLE_DBL(x) ((x)^((int64)(x) < 0 ? CV_BIG_INT(0x7fffffffffffffff) : 0))\n\nstatic inline void* cvAlignPtr( const void* ptr, int align = 32 )\n{\n    CV_DbgAssert ( (align & (align-1)) == 0 );\n    return (void*)( ((size_t)ptr + align - 1) & ~(size_t)(align-1) );\n}\n\nstatic inline int cvAlign( int size, int align )\n{\n    CV_DbgAssert( (align & (align-1)) == 0 && size < INT_MAX );\n    return (size + align - 1) & -align;\n}\n\n#ifdef IPL_DEPTH_8U\nstatic inline cv::Size cvGetMatSize( const CvMat* mat )\n{\n    return cv::Size(mat->cols, mat->rows);\n}\n#endif\n\nnamespace cv\n{\nCV_EXPORTS void scalarToRawData(const cv::Scalar& s, void* buf, int type, int unroll_to = 0);\n}\n\n// property implementation macros\n\n#define CV_IMPL_PROPERTY_RO(type, name, member) \\\n    inline type get##name() const { return member; }\n\n#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \\\n    CV_IMPL_PROPERTY_RO(r_type, name, member) \\\n    inline void set##name(w_type val) { member = val; }\n\n#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \\\n    r_type get##name() const { return internal_obj.get##internal_name(); } \\\n    void set##name(w_type val) { internal_obj.set##internal_name(val); }\n\n#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)\n#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)\n\n#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj)  CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)\n#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)\n\n#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)\n#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)\n\n/****************************************************************************************\\\n*                     Structures and macros for integration with IPP                     *\n\\****************************************************************************************/\n\n#ifdef HAVE_IPP\n#include \"ipp.h\"\n\n#ifndef IPP_VERSION_UPDATE // prior to 7.1\n#define IPP_VERSION_UPDATE 0\n#endif\n\n#define IPP_VERSION_X100 (IPP_VERSION_MAJOR * 100 + IPP_VERSION_MINOR*10 + IPP_VERSION_UPDATE)\n\n// General define for ipp function disabling\n#define IPP_DISABLE_BLOCK 0\n\n#ifdef CV_MALLOC_ALIGN\n#undef CV_MALLOC_ALIGN\n#endif\n#define CV_MALLOC_ALIGN 32 // required for AVX optimization\n\n#define setIppErrorStatus() cv::ipp::setIppStatus(-1, CV_Func, __FILE__, __LINE__)\n\nstatic inline IppiSize ippiSize(int width, int height)\n{\n    IppiSize size = { width, height };\n    return size;\n}\n\nstatic inline IppiSize ippiSize(const cv::Size & _size)\n{\n    IppiSize size = { _size.width, _size.height };\n    return size;\n}\n\nstatic inline IppiBorderType ippiGetBorderType(int borderTypeNI)\n{\n    return borderTypeNI == cv::BORDER_CONSTANT ? ippBorderConst :\n        borderTypeNI == cv::BORDER_WRAP ? ippBorderWrap :\n        borderTypeNI == cv::BORDER_REPLICATE ? ippBorderRepl :\n        borderTypeNI == cv::BORDER_REFLECT_101 ? ippBorderMirror :\n        borderTypeNI == cv::BORDER_REFLECT ? ippBorderMirrorR : (IppiBorderType)-1;\n}\n\nstatic inline IppDataType ippiGetDataType(int depth)\n{\n    return depth == CV_8U ? ipp8u :\n        depth == CV_8S ? ipp8s :\n        depth == CV_16U ? ipp16u :\n        depth == CV_16S ? ipp16s :\n        depth == CV_32S ? ipp32s :\n        depth == CV_32F ? ipp32f :\n        depth == CV_64F ? ipp64f : (IppDataType)-1;\n}\n\n// IPP temporary buffer hepler\ntemplate<typename T>\nclass IppAutoBuffer\n{\npublic:\n    IppAutoBuffer() { m_pBuffer = NULL; }\n    IppAutoBuffer(int size) { Alloc(size); }\n    ~IppAutoBuffer() { Release(); }\n    T* Alloc(int size) { m_pBuffer = (T*)ippMalloc(size); return m_pBuffer; }\n    void Release() { if(m_pBuffer) ippFree(m_pBuffer); }\n    inline operator T* () { return (T*)m_pBuffer;}\n    inline operator const T* () const { return (const T*)m_pBuffer;}\nprivate:\n    // Disable copy operations\n    IppAutoBuffer(IppAutoBuffer &) {};\n    IppAutoBuffer& operator =(const IppAutoBuffer &) {return *this;};\n\n    T* m_pBuffer;\n};\n\n#else\n#define IPP_VERSION_X100 0\n#endif\n\n// There shoud be no API difference in OpenCV between ICV and IPP since 9.0\n#if (defined HAVE_IPP_ICV_ONLY) && IPP_VERSION_X100 >= 900\n#undef HAVE_IPP_ICV_ONLY\n#endif\n\n#ifdef HAVE_IPP_ICV_ONLY\n#define HAVE_ICV 1\n#else\n#define HAVE_ICV 0\n#endif\n\n#if defined HAVE_IPP\n#if IPP_VERSION_X100 >= 900\n#define IPP_INITIALIZER(FEAT)                           \\\n{                                                       \\\n    if(FEAT)                                            \\\n        ippSetCpuFeatures(FEAT);                        \\\n    else                                                \\\n        ippInit();                                      \\\n}\n#elif IPP_VERSION_X100 >= 800\n#define IPP_INITIALIZER(FEAT)                           \\\n{                                                       \\\n    ippInit();                                          \\\n}\n#else\n#define IPP_INITIALIZER(FEAT)                           \\\n{                                                       \\\n    ippStaticInit();                                    \\\n}\n#endif\n\n#ifdef CVAPI_EXPORTS\n#define IPP_INITIALIZER_AUTO                            \\\nstruct __IppInitializer__                               \\\n{                                                       \\\n    __IppInitializer__()                                \\\n    {IPP_INITIALIZER(cv::ipp::getIppFeatures())}        \\\n};                                                      \\\nstatic struct __IppInitializer__ __ipp_initializer__;\n#else\n#define IPP_INITIALIZER_AUTO\n#endif\n#else\n#define IPP_INITIALIZER\n#define IPP_INITIALIZER_AUTO\n#endif\n\n#define CV_IPP_CHECK_COND (cv::ipp::useIPP())\n#define CV_IPP_CHECK() if(CV_IPP_CHECK_COND)\n\n#ifdef HAVE_IPP\n\n#ifdef CV_IPP_RUN_VERBOSE\n#define CV_IPP_RUN_(condition, func, ...)                                   \\\n    {                                                                       \\\n        if (cv::ipp::useIPP() && (condition) && func)                       \\\n        {                                                                   \\\n            printf(\"%s: IPP implementation is running\\n\", CV_Func);         \\\n            fflush(stdout);                                                 \\\n            CV_IMPL_ADD(CV_IMPL_IPP);                                       \\\n            return __VA_ARGS__;                                             \\\n        }                                                                   \\\n        else                                                                \\\n        {                                                                   \\\n            printf(\"%s: Plain implementation is running\\n\", CV_Func);       \\\n            fflush(stdout);                                                 \\\n        }                                                                   \\\n    }\n#elif defined CV_IPP_RUN_ASSERT\n#define CV_IPP_RUN_(condition, func, ...)                                   \\\n    {                                                                       \\\n        if (cv::ipp::useIPP() && (condition))                               \\\n        {                                                                   \\\n            if(func)                                                        \\\n            {                                                               \\\n                CV_IMPL_ADD(CV_IMPL_IPP);                                   \\\n            }                                                               \\\n            else                                                            \\\n            {                                                               \\\n                setIppErrorStatus();                                        \\\n                CV_Error(cv::Error::StsAssert, #func);                      \\\n            }                                                               \\\n            return __VA_ARGS__;                                             \\\n        }                                                                   \\\n    }\n#else\n#define CV_IPP_RUN_(condition, func, ...)                                   \\\n    if (cv::ipp::useIPP() && (condition) && func)                           \\\n    {                                                                       \\\n        CV_IMPL_ADD(CV_IMPL_IPP);                                           \\\n        return __VA_ARGS__;                                                 \\\n    }\n#endif\n\n#else\n#define CV_IPP_RUN_(condition, func, ...)\n#endif\n\n#define CV_IPP_RUN(condition, func, ...) CV_IPP_RUN_(condition, func, __VA_ARGS__)\n\n\n#ifndef IPPI_CALL\n#  define IPPI_CALL(func) CV_Assert((func) >= 0)\n#endif\n\n/* IPP-compatible return codes */\ntypedef enum CvStatus\n{\n    CV_BADMEMBLOCK_ERR          = -113,\n    CV_INPLACE_NOT_SUPPORTED_ERR= -112,\n    CV_UNMATCHED_ROI_ERR        = -111,\n    CV_NOTFOUND_ERR             = -110,\n    CV_BADCONVERGENCE_ERR       = -109,\n\n    CV_BADDEPTH_ERR             = -107,\n    CV_BADROI_ERR               = -106,\n    CV_BADHEADER_ERR            = -105,\n    CV_UNMATCHED_FORMATS_ERR    = -104,\n    CV_UNSUPPORTED_COI_ERR      = -103,\n    CV_UNSUPPORTED_CHANNELS_ERR = -102,\n    CV_UNSUPPORTED_DEPTH_ERR    = -101,\n    CV_UNSUPPORTED_FORMAT_ERR   = -100,\n\n    CV_BADARG_ERR               = -49,  //ipp comp\n    CV_NOTDEFINED_ERR           = -48,  //ipp comp\n\n    CV_BADCHANNELS_ERR          = -47,  //ipp comp\n    CV_BADRANGE_ERR             = -44,  //ipp comp\n    CV_BADSTEP_ERR              = -29,  //ipp comp\n\n    CV_BADFLAG_ERR              =  -12,\n    CV_DIV_BY_ZERO_ERR          =  -11, //ipp comp\n    CV_BADCOEF_ERR              =  -10,\n\n    CV_BADFACTOR_ERR            =  -7,\n    CV_BADPOINT_ERR             =  -6,\n    CV_BADSCALE_ERR             =  -4,\n    CV_OUTOFMEM_ERR             =  -3,\n    CV_NULLPTR_ERR              =  -2,\n    CV_BADSIZE_ERR              =  -1,\n    CV_NO_ERR                   =   0,\n    CV_OK                       =   CV_NO_ERR\n}\nCvStatus;\n\n#ifdef HAVE_TEGRA_OPTIMIZATION\nnamespace tegra {\n\nCV_EXPORTS bool useTegra();\nCV_EXPORTS void setUseTegra(bool flag);\n\n}\n#endif\n\n//! @endcond\n\n#endif // __OPENCV_CORE_PRIVATE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/ptr.inl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, NVIDIA Corporation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the copyright holders or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_PTR_INL_HPP__\n#define __OPENCV_CORE_PTR_INL_HPP__\n\n#include <algorithm>\n\n//! @cond IGNORED\n\nnamespace cv {\n\ntemplate<typename Y>\nvoid DefaultDeleter<Y>::operator () (Y* p) const\n{\n    delete p;\n}\n\nnamespace detail\n{\n\nstruct PtrOwner\n{\n    PtrOwner() : refCount(1)\n    {}\n\n    void incRef()\n    {\n        CV_XADD(&refCount, 1);\n    }\n\n    void decRef()\n    {\n        if (CV_XADD(&refCount, -1) == 1) deleteSelf();\n    }\n\nprotected:\n    /* This doesn't really need to be virtual, since PtrOwner is never deleted\n       directly, but it doesn't hurt and it helps avoid warnings. */\n    virtual ~PtrOwner()\n    {}\n\n    virtual void deleteSelf() = 0;\n\nprivate:\n    unsigned int refCount;\n\n    // noncopyable\n    PtrOwner(const PtrOwner&);\n    PtrOwner& operator = (const PtrOwner&);\n};\n\ntemplate<typename Y, typename D>\nstruct PtrOwnerImpl : PtrOwner\n{\n    PtrOwnerImpl(Y* p, D d) : owned(p), deleter(d)\n    {}\n\n    void deleteSelf()\n    {\n        deleter(owned);\n        delete this;\n    }\n\nprivate:\n    Y* owned;\n    D deleter;\n};\n\n\n}\n\ntemplate<typename T>\nPtr<T>::Ptr() : owner(NULL), stored(NULL)\n{}\n\ntemplate<typename T>\ntemplate<typename Y>\nPtr<T>::Ptr(Y* p)\n  : owner(p\n      ? new detail::PtrOwnerImpl<Y, DefaultDeleter<Y> >(p, DefaultDeleter<Y>())\n      : NULL),\n    stored(p)\n{}\n\ntemplate<typename T>\ntemplate<typename Y, typename D>\nPtr<T>::Ptr(Y* p, D d)\n  : owner(p\n      ? new detail::PtrOwnerImpl<Y, D>(p, d)\n      : NULL),\n    stored(p)\n{}\n\ntemplate<typename T>\nPtr<T>::Ptr(const Ptr& o) : owner(o.owner), stored(o.stored)\n{\n    if (owner) owner->incRef();\n}\n\ntemplate<typename T>\ntemplate<typename Y>\nPtr<T>::Ptr(const Ptr<Y>& o) : owner(o.owner), stored(o.stored)\n{\n    if (owner) owner->incRef();\n}\n\ntemplate<typename T>\ntemplate<typename Y>\nPtr<T>::Ptr(const Ptr<Y>& o, T* p) : owner(o.owner), stored(p)\n{\n    if (owner) owner->incRef();\n}\n\ntemplate<typename T>\nPtr<T>::~Ptr()\n{\n    release();\n}\n\ntemplate<typename T>\nPtr<T>& Ptr<T>::operator = (const Ptr<T>& o)\n{\n    Ptr(o).swap(*this);\n    return *this;\n}\n\ntemplate<typename T>\ntemplate<typename Y>\nPtr<T>& Ptr<T>::operator = (const Ptr<Y>& o)\n{\n    Ptr(o).swap(*this);\n    return *this;\n}\n\ntemplate<typename T>\nvoid Ptr<T>::release()\n{\n    if (owner) owner->decRef();\n    owner = NULL;\n    stored = NULL;\n}\n\ntemplate<typename T>\ntemplate<typename Y>\nvoid Ptr<T>::reset(Y* p)\n{\n    Ptr(p).swap(*this);\n}\n\ntemplate<typename T>\ntemplate<typename Y, typename D>\nvoid Ptr<T>::reset(Y* p, D d)\n{\n    Ptr(p, d).swap(*this);\n}\n\ntemplate<typename T>\nvoid Ptr<T>::swap(Ptr<T>& o)\n{\n    std::swap(owner, o.owner);\n    std::swap(stored, o.stored);\n}\n\ntemplate<typename T>\nT* Ptr<T>::get() const\n{\n    return stored;\n}\n\ntemplate<typename T>\ntypename detail::RefOrVoid<T>::type Ptr<T>::operator * () const\n{\n    return *stored;\n}\n\ntemplate<typename T>\nT* Ptr<T>::operator -> () const\n{\n    return stored;\n}\n\ntemplate<typename T>\nPtr<T>::operator T* () const\n{\n    return stored;\n}\n\n\ntemplate<typename T>\nbool Ptr<T>::empty() const\n{\n    return !stored;\n}\n\ntemplate<typename T>\ntemplate<typename Y>\nPtr<Y> Ptr<T>::staticCast() const\n{\n    return Ptr<Y>(*this, static_cast<Y*>(stored));\n}\n\ntemplate<typename T>\ntemplate<typename Y>\nPtr<Y> Ptr<T>::constCast() const\n{\n    return Ptr<Y>(*this, const_cast<Y*>(stored));\n}\n\ntemplate<typename T>\ntemplate<typename Y>\nPtr<Y> Ptr<T>::dynamicCast() const\n{\n    return Ptr<Y>(*this, dynamic_cast<Y*>(stored));\n}\n\n#ifdef CV_CXX_MOVE_SEMANTICS\n\ntemplate<typename T>\nPtr<T>::Ptr(Ptr&& o) : owner(o.owner), stored(o.stored)\n{\n    o.owner = NULL;\n    o.stored = NULL;\n}\n\ntemplate<typename T>\nPtr<T>& Ptr<T>::operator = (Ptr<T>&& o)\n{\n    release();\n    owner = o.owner;\n    stored = o.stored;\n    o.owner = NULL;\n    o.stored = NULL;\n    return *this;\n}\n\n#endif\n\n\ntemplate<typename T>\nvoid swap(Ptr<T>& ptr1, Ptr<T>& ptr2){\n    ptr1.swap(ptr2);\n}\n\ntemplate<typename T>\nbool operator == (const Ptr<T>& ptr1, const Ptr<T>& ptr2)\n{\n    return ptr1.get() == ptr2.get();\n}\n\ntemplate<typename T>\nbool operator != (const Ptr<T>& ptr1, const Ptr<T>& ptr2)\n{\n    return ptr1.get() != ptr2.get();\n}\n\ntemplate<typename T>\nPtr<T> makePtr()\n{\n    return Ptr<T>(new T());\n}\n\ntemplate<typename T, typename A1>\nPtr<T> makePtr(const A1& a1)\n{\n    return Ptr<T>(new T(a1));\n}\n\ntemplate<typename T, typename A1, typename A2>\nPtr<T> makePtr(const A1& a1, const A2& a2)\n{\n    return Ptr<T>(new T(a1, a2));\n}\n\ntemplate<typename T, typename A1, typename A2, typename A3>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3)\n{\n    return Ptr<T>(new T(a1, a2, a3));\n}\n\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4)\n{\n    return Ptr<T>(new T(a1, a2, a3, a4));\n}\n\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5)\n{\n    return Ptr<T>(new T(a1, a2, a3, a4, a5));\n}\n\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6)\n{\n    return Ptr<T>(new T(a1, a2, a3, a4, a5, a6));\n}\n\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7)\n{\n    return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7));\n}\n\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8)\n{\n    return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7, a8));\n}\n\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9)\n{\n    return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9));\n}\n\ntemplate<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9, typename A10>\nPtr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10)\n{\n    return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10));\n}\n\n} // namespace cv\n\n//! @endcond\n\n#endif // __OPENCV_CORE_PTR_INL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/saturate.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2014, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_SATURATE_HPP__\n#define __OPENCV_CORE_SATURATE_HPP__\n\n#include \"opencv2/core/cvdef.h\"\n#include \"opencv2/core/fast_math.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup core_utils\n//! @{\n\n/////////////// saturate_cast (used in image & signal processing) ///////////////////\n\n/** @brief Template function for accurate conversion from one primitive type to another.\n\n The functions saturate_cast resemble the standard C++ cast operations, such as static_cast\\<T\\>()\n and others. They perform an efficient and accurate conversion from one primitive type to another\n (see the introduction chapter). saturate in the name means that when the input value v is out of the\n range of the target type, the result is not formed just by taking low bits of the input, but instead\n the value is clipped. For example:\n @code\n uchar a = saturate_cast<uchar>(-100); // a = 0 (UCHAR_MIN)\n short b = saturate_cast<short>(33333.33333); // b = 32767 (SHRT_MAX)\n @endcode\n Such clipping is done when the target type is unsigned char , signed char , unsigned short or\n signed short . For 32-bit integers, no clipping is done.\n\n When the parameter is a floating-point value and the target type is an integer (8-, 16- or 32-bit),\n the floating-point value is first rounded to the nearest integer and then clipped if needed (when\n the target type is 8- or 16-bit).\n\n This operation is used in the simplest or most complex image processing functions in OpenCV.\n\n @param v Function parameter.\n @sa add, subtract, multiply, divide, Mat::convertTo\n */\ntemplate<typename _Tp> static inline _Tp saturate_cast(uchar v)    { return _Tp(v); }\n/** @overload */\ntemplate<typename _Tp> static inline _Tp saturate_cast(schar v)    { return _Tp(v); }\n/** @overload */\ntemplate<typename _Tp> static inline _Tp saturate_cast(ushort v)   { return _Tp(v); }\n/** @overload */\ntemplate<typename _Tp> static inline _Tp saturate_cast(short v)    { return _Tp(v); }\n/** @overload */\ntemplate<typename _Tp> static inline _Tp saturate_cast(unsigned v) { return _Tp(v); }\n/** @overload */\ntemplate<typename _Tp> static inline _Tp saturate_cast(int v)      { return _Tp(v); }\n/** @overload */\ntemplate<typename _Tp> static inline _Tp saturate_cast(float v)    { return _Tp(v); }\n/** @overload */\ntemplate<typename _Tp> static inline _Tp saturate_cast(double v)   { return _Tp(v); }\n/** @overload */\ntemplate<typename _Tp> static inline _Tp saturate_cast(int64 v)    { return _Tp(v); }\n/** @overload */\ntemplate<typename _Tp> static inline _Tp saturate_cast(uint64 v)   { return _Tp(v); }\n\ntemplate<> inline uchar saturate_cast<uchar>(schar v)        { return (uchar)std::max((int)v, 0); }\ntemplate<> inline uchar saturate_cast<uchar>(ushort v)       { return (uchar)std::min((unsigned)v, (unsigned)UCHAR_MAX); }\ntemplate<> inline uchar saturate_cast<uchar>(int v)          { return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); }\ntemplate<> inline uchar saturate_cast<uchar>(short v)        { return saturate_cast<uchar>((int)v); }\ntemplate<> inline uchar saturate_cast<uchar>(unsigned v)     { return (uchar)std::min(v, (unsigned)UCHAR_MAX); }\ntemplate<> inline uchar saturate_cast<uchar>(float v)        { int iv = cvRound(v); return saturate_cast<uchar>(iv); }\ntemplate<> inline uchar saturate_cast<uchar>(double v)       { int iv = cvRound(v); return saturate_cast<uchar>(iv); }\ntemplate<> inline uchar saturate_cast<uchar>(int64 v)        { return (uchar)((uint64)v <= (uint64)UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); }\ntemplate<> inline uchar saturate_cast<uchar>(uint64 v)       { return (uchar)std::min(v, (uint64)UCHAR_MAX); }\n\ntemplate<> inline schar saturate_cast<schar>(uchar v)        { return (schar)std::min((int)v, SCHAR_MAX); }\ntemplate<> inline schar saturate_cast<schar>(ushort v)       { return (schar)std::min((unsigned)v, (unsigned)SCHAR_MAX); }\ntemplate<> inline schar saturate_cast<schar>(int v)          { return (schar)((unsigned)(v-SCHAR_MIN) <= (unsigned)UCHAR_MAX ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); }\ntemplate<> inline schar saturate_cast<schar>(short v)        { return saturate_cast<schar>((int)v); }\ntemplate<> inline schar saturate_cast<schar>(unsigned v)     { return (schar)std::min(v, (unsigned)SCHAR_MAX); }\ntemplate<> inline schar saturate_cast<schar>(float v)        { int iv = cvRound(v); return saturate_cast<schar>(iv); }\ntemplate<> inline schar saturate_cast<schar>(double v)       { int iv = cvRound(v); return saturate_cast<schar>(iv); }\ntemplate<> inline schar saturate_cast<schar>(int64 v)        { return (schar)((uint64)((int64)v-SCHAR_MIN) <= (uint64)UCHAR_MAX ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); }\ntemplate<> inline schar saturate_cast<schar>(uint64 v)       { return (schar)std::min(v, (uint64)SCHAR_MAX); }\n\ntemplate<> inline ushort saturate_cast<ushort>(schar v)      { return (ushort)std::max((int)v, 0); }\ntemplate<> inline ushort saturate_cast<ushort>(short v)      { return (ushort)std::max((int)v, 0); }\ntemplate<> inline ushort saturate_cast<ushort>(int v)        { return (ushort)((unsigned)v <= (unsigned)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); }\ntemplate<> inline ushort saturate_cast<ushort>(unsigned v)   { return (ushort)std::min(v, (unsigned)USHRT_MAX); }\ntemplate<> inline ushort saturate_cast<ushort>(float v)      { int iv = cvRound(v); return saturate_cast<ushort>(iv); }\ntemplate<> inline ushort saturate_cast<ushort>(double v)     { int iv = cvRound(v); return saturate_cast<ushort>(iv); }\ntemplate<> inline ushort saturate_cast<ushort>(int64 v)      { return (ushort)((uint64)v <= (uint64)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); }\ntemplate<> inline ushort saturate_cast<ushort>(uint64 v)     { return (ushort)std::min(v, (uint64)USHRT_MAX); }\n\ntemplate<> inline short saturate_cast<short>(ushort v)       { return (short)std::min((int)v, SHRT_MAX); }\ntemplate<> inline short saturate_cast<short>(int v)          { return (short)((unsigned)(v - SHRT_MIN) <= (unsigned)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN); }\ntemplate<> inline short saturate_cast<short>(unsigned v)     { return (short)std::min(v, (unsigned)SHRT_MAX); }\ntemplate<> inline short saturate_cast<short>(float v)        { int iv = cvRound(v); return saturate_cast<short>(iv); }\ntemplate<> inline short saturate_cast<short>(double v)       { int iv = cvRound(v); return saturate_cast<short>(iv); }\ntemplate<> inline short saturate_cast<short>(int64 v)        { return (short)((uint64)((int64)v - SHRT_MIN) <= (uint64)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN); }\ntemplate<> inline short saturate_cast<short>(uint64 v)       { return (short)std::min(v, (uint64)SHRT_MAX); }\n\ntemplate<> inline int saturate_cast<int>(float v)            { return cvRound(v); }\ntemplate<> inline int saturate_cast<int>(double v)           { return cvRound(v); }\n\n// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc.\ntemplate<> inline unsigned saturate_cast<unsigned>(float v)  { return cvRound(v); }\ntemplate<> inline unsigned saturate_cast<unsigned>(double v) { return cvRound(v); }\n\n//! @}\n\n} // cv\n\n#endif // __OPENCV_CORE_SATURATE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/sse_utils.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_SSE_UTILS_HPP__\n#define __OPENCV_CORE_SSE_UTILS_HPP__\n\n#ifndef __cplusplus\n#  error sse_utils.hpp header must be compiled as C++\n#endif\n\n#include \"opencv2/core/cvdef.h\"\n\n//! @addtogroup core_utils_sse\n//! @{\n\n#if CV_SSE2\n\ninline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1)\n{\n    __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g0);\n    __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g0);\n    __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_g1);\n    __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_g1);\n\n    __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk2);\n    __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk2);\n    __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk3);\n    __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk3);\n\n    __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk2);\n    __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk2);\n    __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk3);\n    __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk3);\n\n    __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk2);\n    __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk2);\n    __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk3);\n    __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk3);\n\n    v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk2);\n    v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk2);\n    v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk3);\n    v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk3);\n}\n\ninline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0,\n                                  __m128i & v_g1, __m128i & v_b0, __m128i & v_b1)\n{\n    __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g1);\n    __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g1);\n    __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b0);\n    __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_b0);\n    __m128i layer1_chunk4 = _mm_unpacklo_epi8(v_g0, v_b1);\n    __m128i layer1_chunk5 = _mm_unpackhi_epi8(v_g0, v_b1);\n\n    __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk3);\n    __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk3);\n    __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk4);\n    __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk4);\n    __m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk5);\n    __m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk5);\n\n    __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk3);\n    __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk3);\n    __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk4);\n    __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk4);\n    __m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk5);\n    __m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk5);\n\n    __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk3);\n    __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk3);\n    __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk4);\n    __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk4);\n    __m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk5);\n    __m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk5);\n\n    v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk3);\n    v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk3);\n    v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk4);\n    v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk4);\n    v_b0 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk5);\n    v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk5);\n}\n\ninline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1,\n                                  __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1)\n{\n    __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_b0);\n    __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_b0);\n    __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b1);\n    __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_b1);\n    __m128i layer1_chunk4 = _mm_unpacklo_epi8(v_g0, v_a0);\n    __m128i layer1_chunk5 = _mm_unpackhi_epi8(v_g0, v_a0);\n    __m128i layer1_chunk6 = _mm_unpacklo_epi8(v_g1, v_a1);\n    __m128i layer1_chunk7 = _mm_unpackhi_epi8(v_g1, v_a1);\n\n    __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk4);\n    __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk4);\n    __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk5);\n    __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk5);\n    __m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk6);\n    __m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk6);\n    __m128i layer2_chunk6 = _mm_unpacklo_epi8(layer1_chunk3, layer1_chunk7);\n    __m128i layer2_chunk7 = _mm_unpackhi_epi8(layer1_chunk3, layer1_chunk7);\n\n    __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk4);\n    __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk4);\n    __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk5);\n    __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk5);\n    __m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk6);\n    __m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk6);\n    __m128i layer3_chunk6 = _mm_unpacklo_epi8(layer2_chunk3, layer2_chunk7);\n    __m128i layer3_chunk7 = _mm_unpackhi_epi8(layer2_chunk3, layer2_chunk7);\n\n    __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk4);\n    __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk4);\n    __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk5);\n    __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk5);\n    __m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk6);\n    __m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk6);\n    __m128i layer4_chunk6 = _mm_unpacklo_epi8(layer3_chunk3, layer3_chunk7);\n    __m128i layer4_chunk7 = _mm_unpackhi_epi8(layer3_chunk3, layer3_chunk7);\n\n    v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk4);\n    v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk4);\n    v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk5);\n    v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk5);\n    v_b0 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk6);\n    v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk6);\n    v_a0 = _mm_unpacklo_epi8(layer4_chunk3, layer4_chunk7);\n    v_a1 = _mm_unpackhi_epi8(layer4_chunk3, layer4_chunk7);\n}\n\ninline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1)\n{\n    __m128i v_mask = _mm_set1_epi16(0x00ff);\n\n    __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));\n    __m128i layer4_chunk2 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8));\n    __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));\n    __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8));\n\n    __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask));\n    __m128i layer3_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8));\n    __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask));\n    __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8));\n\n    __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));\n    __m128i layer2_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8));\n    __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));\n    __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8));\n\n    __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));\n    __m128i layer1_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8));\n    __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));\n    __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8));\n\n    v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));\n    v_g0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8));\n    v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));\n    v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8));\n}\n\ninline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0,\n                                __m128i & v_g1, __m128i & v_b0, __m128i & v_b1)\n{\n    __m128i v_mask = _mm_set1_epi16(0x00ff);\n\n    __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));\n    __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8));\n    __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));\n    __m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8));\n    __m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask));\n    __m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8));\n\n    __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask));\n    __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8));\n    __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask));\n    __m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8));\n    __m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask));\n    __m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8));\n\n    __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));\n    __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8));\n    __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));\n    __m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8));\n    __m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask));\n    __m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8));\n\n    __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));\n    __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8));\n    __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));\n    __m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8));\n    __m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask));\n    __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8));\n\n    v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));\n    v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8));\n    v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));\n    v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8));\n    v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask));\n    v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8));\n}\n\ninline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1,\n                                __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1)\n{\n    __m128i v_mask = _mm_set1_epi16(0x00ff);\n\n    __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));\n    __m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8));\n    __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));\n    __m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8));\n    __m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask));\n    __m128i layer4_chunk6 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8));\n    __m128i layer4_chunk3 = _mm_packus_epi16(_mm_and_si128(v_a0, v_mask), _mm_and_si128(v_a1, v_mask));\n    __m128i layer4_chunk7 = _mm_packus_epi16(_mm_srli_epi16(v_a0, 8), _mm_srli_epi16(v_a1, 8));\n\n    __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask));\n    __m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8));\n    __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask));\n    __m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8));\n    __m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask));\n    __m128i layer3_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8));\n    __m128i layer3_chunk3 = _mm_packus_epi16(_mm_and_si128(layer4_chunk6, v_mask), _mm_and_si128(layer4_chunk7, v_mask));\n    __m128i layer3_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk6, 8), _mm_srli_epi16(layer4_chunk7, 8));\n\n    __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));\n    __m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8));\n    __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));\n    __m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8));\n    __m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask));\n    __m128i layer2_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8));\n    __m128i layer2_chunk3 = _mm_packus_epi16(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask));\n    __m128i layer2_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk6, 8), _mm_srli_epi16(layer3_chunk7, 8));\n\n    __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));\n    __m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8));\n    __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));\n    __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8));\n    __m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask));\n    __m128i layer1_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8));\n    __m128i layer1_chunk3 = _mm_packus_epi16(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask));\n    __m128i layer1_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk6, 8), _mm_srli_epi16(layer2_chunk7, 8));\n\n    v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));\n    v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8));\n    v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));\n    v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8));\n    v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask));\n    v_a0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8));\n    v_g1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk6, v_mask), _mm_and_si128(layer1_chunk7, v_mask));\n    v_a1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk6, 8), _mm_srli_epi16(layer1_chunk7, 8));\n}\n\ninline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1)\n{\n    __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g0);\n    __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g0);\n    __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_g1);\n    __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_g1);\n\n    __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk2);\n    __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk2);\n    __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk3);\n    __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk3);\n\n    __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk2);\n    __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk2);\n    __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk3);\n    __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk3);\n\n    v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk2);\n    v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk2);\n    v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk3);\n    v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk3);\n}\n\ninline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0,\n                                   __m128i & v_g1, __m128i & v_b0, __m128i & v_b1)\n{\n    __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g1);\n    __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g1);\n    __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b0);\n    __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_b0);\n    __m128i layer1_chunk4 = _mm_unpacklo_epi16(v_g0, v_b1);\n    __m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_b1);\n\n    __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk3);\n    __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk3);\n    __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk4);\n    __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk4);\n    __m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk5);\n    __m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk5);\n\n    __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk3);\n    __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk3);\n    __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk4);\n    __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk4);\n    __m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk5);\n    __m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk5);\n\n    v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk3);\n    v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk3);\n    v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk4);\n    v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk4);\n    v_b0 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk5);\n    v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk5);\n}\n\ninline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1,\n                                   __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1)\n{\n    __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_b0);\n    __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_b0);\n    __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b1);\n    __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_b1);\n    __m128i layer1_chunk4 = _mm_unpacklo_epi16(v_g0, v_a0);\n    __m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_a0);\n    __m128i layer1_chunk6 = _mm_unpacklo_epi16(v_g1, v_a1);\n    __m128i layer1_chunk7 = _mm_unpackhi_epi16(v_g1, v_a1);\n\n    __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk4);\n    __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk4);\n    __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk5);\n    __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk5);\n    __m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk6);\n    __m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk6);\n    __m128i layer2_chunk6 = _mm_unpacklo_epi16(layer1_chunk3, layer1_chunk7);\n    __m128i layer2_chunk7 = _mm_unpackhi_epi16(layer1_chunk3, layer1_chunk7);\n\n    __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk4);\n    __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk4);\n    __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk5);\n    __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk5);\n    __m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk6);\n    __m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk6);\n    __m128i layer3_chunk6 = _mm_unpacklo_epi16(layer2_chunk3, layer2_chunk7);\n    __m128i layer3_chunk7 = _mm_unpackhi_epi16(layer2_chunk3, layer2_chunk7);\n\n    v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk4);\n    v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk4);\n    v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk5);\n    v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk5);\n    v_b0 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk6);\n    v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk6);\n    v_a0 = _mm_unpacklo_epi16(layer3_chunk3, layer3_chunk7);\n    v_a1 = _mm_unpackhi_epi16(layer3_chunk3, layer3_chunk7);\n}\n\n#if CV_SSE4_1\n\ninline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1)\n{\n    __m128i v_mask = _mm_set1_epi32(0x0000ffff);\n\n    __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));\n    __m128i layer3_chunk2 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16));\n    __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));\n    __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16));\n\n    __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));\n    __m128i layer2_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16));\n    __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));\n    __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16));\n\n    __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));\n    __m128i layer1_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16));\n    __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));\n    __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16));\n\n    v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));\n    v_g0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16));\n    v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));\n    v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16));\n}\n\ninline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0,\n                                 __m128i & v_g1, __m128i & v_b0, __m128i & v_b1)\n{\n    __m128i v_mask = _mm_set1_epi32(0x0000ffff);\n\n    __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));\n    __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16));\n    __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));\n    __m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16));\n    __m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask));\n    __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16));\n\n    __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));\n    __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16));\n    __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));\n    __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16));\n    __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask));\n    __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16));\n\n    __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));\n    __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16));\n    __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));\n    __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16));\n    __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask));\n    __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16));\n\n    v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));\n    v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16));\n    v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));\n    v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16));\n    v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask));\n    v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16));\n}\n\ninline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1,\n                                 __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1)\n{\n    __m128i v_mask = _mm_set1_epi32(0x0000ffff);\n\n    __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));\n    __m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16));\n    __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));\n    __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16));\n    __m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask));\n    __m128i layer3_chunk6 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16));\n    __m128i layer3_chunk3 = _mm_packus_epi32(_mm_and_si128(v_a0, v_mask), _mm_and_si128(v_a1, v_mask));\n    __m128i layer3_chunk7 = _mm_packus_epi32(_mm_srli_epi32(v_a0, 16), _mm_srli_epi32(v_a1, 16));\n\n    __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));\n    __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16));\n    __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));\n    __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16));\n    __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask));\n    __m128i layer2_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16));\n    __m128i layer2_chunk3 = _mm_packus_epi32(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask));\n    __m128i layer2_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk6, 16), _mm_srli_epi32(layer3_chunk7, 16));\n\n    __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));\n    __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16));\n    __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));\n    __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16));\n    __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask));\n    __m128i layer1_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16));\n    __m128i layer1_chunk3 = _mm_packus_epi32(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask));\n    __m128i layer1_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk6, 16), _mm_srli_epi32(layer2_chunk7, 16));\n\n    v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));\n    v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16));\n    v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));\n    v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16));\n    v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask));\n    v_a0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16));\n    v_g1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk6, v_mask), _mm_and_si128(layer1_chunk7, v_mask));\n    v_a1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk6, 16), _mm_srli_epi32(layer1_chunk7, 16));\n}\n\n#endif // CV_SSE4_1\n\ninline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1)\n{\n    __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g0);\n    __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g0);\n    __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_g1);\n    __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_g1);\n\n    __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk2);\n    __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk2);\n    __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk3);\n    __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk3);\n\n    v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk2);\n    v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk2);\n    v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk3);\n    v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk3);\n}\n\ninline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0,\n                                __m128 & v_g1, __m128 & v_b0, __m128 & v_b1)\n{\n    __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g1);\n    __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g1);\n    __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b0);\n    __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b0);\n    __m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_b1);\n    __m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_b1);\n\n    __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk3);\n    __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk3);\n    __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk4);\n    __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk4);\n    __m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk5);\n    __m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk5);\n\n    v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk3);\n    v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk3);\n    v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk4);\n    v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk4);\n    v_b0 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk5);\n    v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk5);\n}\n\ninline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1,\n                                __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1)\n{\n    __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_b0);\n    __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_b0);\n    __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b1);\n    __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b1);\n    __m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_a0);\n    __m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_a0);\n    __m128 layer1_chunk6 = _mm_unpacklo_ps(v_g1, v_a1);\n    __m128 layer1_chunk7 = _mm_unpackhi_ps(v_g1, v_a1);\n\n    __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk4);\n    __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk4);\n    __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk5);\n    __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk5);\n    __m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk6);\n    __m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk6);\n    __m128 layer2_chunk6 = _mm_unpacklo_ps(layer1_chunk3, layer1_chunk7);\n    __m128 layer2_chunk7 = _mm_unpackhi_ps(layer1_chunk3, layer1_chunk7);\n\n    v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk4);\n    v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk4);\n    v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk5);\n    v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk5);\n    v_b0 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk6);\n    v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk6);\n    v_a0 = _mm_unpacklo_ps(layer2_chunk3, layer2_chunk7);\n    v_a1 = _mm_unpackhi_ps(layer2_chunk3, layer2_chunk7);\n}\n\ninline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1)\n{\n    const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1);\n\n    __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo);\n    __m128 layer2_chunk2 = _mm_shuffle_ps(v_r0, v_r1, mask_hi);\n    __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo);\n    __m128 layer2_chunk3 = _mm_shuffle_ps(v_g0, v_g1, mask_hi);\n\n    __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo);\n    __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi);\n    __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo);\n    __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi);\n\n    v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo);\n    v_g0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi);\n    v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo);\n    v_g1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi);\n}\n\ninline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0,\n                              __m128 & v_g1, __m128 & v_b0, __m128 & v_b1)\n{\n    const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1);\n\n    __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo);\n    __m128 layer2_chunk3 = _mm_shuffle_ps(v_r0, v_r1, mask_hi);\n    __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo);\n    __m128 layer2_chunk4 = _mm_shuffle_ps(v_g0, v_g1, mask_hi);\n    __m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo);\n    __m128 layer2_chunk5 = _mm_shuffle_ps(v_b0, v_b1, mask_hi);\n\n    __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo);\n    __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi);\n    __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo);\n    __m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi);\n    __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo);\n    __m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi);\n\n    v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo);\n    v_g1 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi);\n    v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo);\n    v_b0 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi);\n    v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo);\n    v_b1 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi);\n}\n\ninline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1,\n                              __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1)\n{\n    const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1);\n\n    __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo);\n    __m128 layer2_chunk4 = _mm_shuffle_ps(v_r0, v_r1, mask_hi);\n    __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo);\n    __m128 layer2_chunk5 = _mm_shuffle_ps(v_g0, v_g1, mask_hi);\n    __m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo);\n    __m128 layer2_chunk6 = _mm_shuffle_ps(v_b0, v_b1, mask_hi);\n    __m128 layer2_chunk3 = _mm_shuffle_ps(v_a0, v_a1, mask_lo);\n    __m128 layer2_chunk7 = _mm_shuffle_ps(v_a0, v_a1, mask_hi);\n\n    __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo);\n    __m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi);\n    __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo);\n    __m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi);\n    __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo);\n    __m128 layer1_chunk6 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi);\n    __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk6, layer2_chunk7, mask_lo);\n    __m128 layer1_chunk7 = _mm_shuffle_ps(layer2_chunk6, layer2_chunk7, mask_hi);\n\n    v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo);\n    v_b0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi);\n    v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo);\n    v_b1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi);\n    v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo);\n    v_a0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi);\n    v_g1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_lo);\n    v_a1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_hi);\n}\n\n#endif // CV_SSE2\n\n//! @}\n\n#endif //__OPENCV_CORE_SSE_UTILS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/traits.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_TRAITS_HPP__\n#define __OPENCV_CORE_TRAITS_HPP__\n\n#include \"opencv2/core/cvdef.h\"\n\nnamespace cv\n{\n\n//! @addtogroup core_basic\n//! @{\n\n/** @brief Template \"trait\" class for OpenCV primitive data types.\n\nA primitive OpenCV data type is one of unsigned char, bool, signed char, unsigned short, signed\nshort, int, float, double, or a tuple of values of one of these types, where all the values in the\ntuple have the same type. Any primitive type from the list can be defined by an identifier in the\nform CV_\\<bit-depth\\>{U|S|F}C(\\<number_of_channels\\>), for example: uchar \\~ CV_8UC1, 3-element\nfloating-point tuple \\~ CV_32FC3, and so on. A universal OpenCV structure that is able to store a\nsingle instance of such a primitive data type is Vec. Multiple instances of such a type can be\nstored in a std::vector, Mat, Mat_, SparseMat, SparseMat_, or any other container that is able to\nstore Vec instances.\n\nThe DataType class is basically used to provide a description of such primitive data types without\nadding any fields or methods to the corresponding classes (and it is actually impossible to add\nanything to primitive C/C++ data types). This technique is known in C++ as class traits. It is not\nDataType itself that is used but its specialized versions, such as:\n@code\n    template<> class DataType<uchar>\n    {\n        typedef uchar value_type;\n        typedef int work_type;\n        typedef uchar channel_type;\n        enum { channel_type = CV_8U, channels = 1, fmt='u', type = CV_8U };\n    };\n    ...\n    template<typename _Tp> DataType<std::complex<_Tp> >\n    {\n        typedef std::complex<_Tp> value_type;\n        typedef std::complex<_Tp> work_type;\n        typedef _Tp channel_type;\n        // DataDepth is another helper trait class\n        enum { depth = DataDepth<_Tp>::value, channels=2,\n            fmt=(channels-1)*256+DataDepth<_Tp>::fmt,\n            type=CV_MAKETYPE(depth, channels) };\n    };\n    ...\n@endcode\nThe main purpose of this class is to convert compilation-time type information to an\nOpenCV-compatible data type identifier, for example:\n@code\n    // allocates a 30x40 floating-point matrix\n    Mat A(30, 40, DataType<float>::type);\n\n    Mat B = Mat_<std::complex<double> >(3, 3);\n    // the statement below will print 6, 2 , that is depth == CV_64F, channels == 2\n    cout << B.depth() << \", \" << B.channels() << endl;\n@endcode\nSo, such traits are used to tell OpenCV which data type you are working with, even if such a type is\nnot native to OpenCV. For example, the matrix B initialization above is compiled because OpenCV\ndefines the proper specialized template class DataType\\<complex\\<_Tp\\> \\> . This mechanism is also\nuseful (and used in OpenCV this way) for generic algorithms implementations.\n*/\ntemplate<typename _Tp> class DataType\n{\npublic:\n    typedef _Tp         value_type;\n    typedef value_type  work_type;\n    typedef value_type  channel_type;\n    typedef value_type  vec_type;\n    enum { generic_type = 1,\n           depth        = -1,\n           channels     = 1,\n           fmt          = 0,\n           type = CV_MAKETYPE(depth, channels)\n         };\n};\n\ntemplate<> class DataType<bool>\n{\npublic:\n    typedef bool        value_type;\n    typedef int         work_type;\n    typedef value_type  channel_type;\n    typedef value_type  vec_type;\n    enum { generic_type = 0,\n           depth        = CV_8U,\n           channels     = 1,\n           fmt          = (int)'u',\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\ntemplate<> class DataType<uchar>\n{\npublic:\n    typedef uchar       value_type;\n    typedef int         work_type;\n    typedef value_type  channel_type;\n    typedef value_type  vec_type;\n    enum { generic_type = 0,\n           depth        = CV_8U,\n           channels     = 1,\n           fmt          = (int)'u',\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\ntemplate<> class DataType<schar>\n{\npublic:\n    typedef schar       value_type;\n    typedef int         work_type;\n    typedef value_type  channel_type;\n    typedef value_type  vec_type;\n    enum { generic_type = 0,\n           depth        = CV_8S,\n           channels     = 1,\n           fmt          = (int)'c',\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\ntemplate<> class DataType<char>\n{\npublic:\n    typedef schar       value_type;\n    typedef int         work_type;\n    typedef value_type  channel_type;\n    typedef value_type  vec_type;\n    enum { generic_type = 0,\n           depth        = CV_8S,\n           channels     = 1,\n           fmt          = (int)'c',\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\ntemplate<> class DataType<ushort>\n{\npublic:\n    typedef ushort      value_type;\n    typedef int         work_type;\n    typedef value_type  channel_type;\n    typedef value_type  vec_type;\n    enum { generic_type = 0,\n           depth        = CV_16U,\n           channels     = 1,\n           fmt          = (int)'w',\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\ntemplate<> class DataType<short>\n{\npublic:\n    typedef short       value_type;\n    typedef int         work_type;\n    typedef value_type  channel_type;\n    typedef value_type  vec_type;\n    enum { generic_type = 0,\n           depth        = CV_16S,\n           channels     = 1,\n           fmt          = (int)'s',\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\ntemplate<> class DataType<int>\n{\npublic:\n    typedef int         value_type;\n    typedef value_type  work_type;\n    typedef value_type  channel_type;\n    typedef value_type  vec_type;\n    enum { generic_type = 0,\n           depth        = CV_32S,\n           channels     = 1,\n           fmt          = (int)'i',\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\ntemplate<> class DataType<float>\n{\npublic:\n    typedef float       value_type;\n    typedef value_type  work_type;\n    typedef value_type  channel_type;\n    typedef value_type  vec_type;\n    enum { generic_type = 0,\n           depth        = CV_32F,\n           channels     = 1,\n           fmt          = (int)'f',\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\ntemplate<> class DataType<double>\n{\npublic:\n    typedef double      value_type;\n    typedef value_type  work_type;\n    typedef value_type  channel_type;\n    typedef value_type  vec_type;\n    enum { generic_type = 0,\n           depth        = CV_64F,\n           channels     = 1,\n           fmt          = (int)'d',\n           type         = CV_MAKETYPE(depth, channels)\n         };\n};\n\n\n/** @brief A helper class for cv::DataType\n\nThe class is specialized for each fundamental numerical data type supported by OpenCV. It provides\nDataDepth<T>::value constant.\n*/\ntemplate<typename _Tp> class DataDepth\n{\npublic:\n    enum\n    {\n        value = DataType<_Tp>::depth,\n        fmt   = DataType<_Tp>::fmt\n    };\n};\n\n\n\ntemplate<int _depth> class TypeDepth\n{\n    enum { depth = CV_USRTYPE1 };\n    typedef void value_type;\n};\n\ntemplate<> class TypeDepth<CV_8U>\n{\n    enum { depth = CV_8U };\n    typedef uchar value_type;\n};\n\ntemplate<> class TypeDepth<CV_8S>\n{\n    enum { depth = CV_8S };\n    typedef schar value_type;\n};\n\ntemplate<> class TypeDepth<CV_16U>\n{\n    enum { depth = CV_16U };\n    typedef ushort value_type;\n};\n\ntemplate<> class TypeDepth<CV_16S>\n{\n    enum { depth = CV_16S };\n    typedef short value_type;\n};\n\ntemplate<> class TypeDepth<CV_32S>\n{\n    enum { depth = CV_32S };\n    typedef int value_type;\n};\n\ntemplate<> class TypeDepth<CV_32F>\n{\n    enum { depth = CV_32F };\n    typedef float value_type;\n};\n\ntemplate<> class TypeDepth<CV_64F>\n{\n    enum { depth = CV_64F };\n    typedef double value_type;\n};\n\n//! @}\n\n} // cv\n\n#endif // __OPENCV_CORE_TRAITS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/types.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_TYPES_HPP__\n#define __OPENCV_CORE_TYPES_HPP__\n\n#ifndef __cplusplus\n#  error types.hpp header must be compiled as C++\n#endif\n\n#include <climits>\n#include <cfloat>\n#include <vector>\n\n#include \"opencv2/core/cvdef.h\"\n#include \"opencv2/core/cvstd.hpp\"\n#include \"opencv2/core/matx.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup core_basic\n//! @{\n\n//////////////////////////////// Complex //////////////////////////////\n\n/** @brief  A complex number class.\n\n  The template class is similar and compatible with std::complex, however it provides slightly\n  more convenient access to the real and imaginary parts using through the simple field access, as opposite\n  to std::complex::real() and std::complex::imag().\n*/\ntemplate<typename _Tp> class Complex\n{\npublic:\n\n    //! constructors\n    Complex();\n    Complex( _Tp _re, _Tp _im = 0 );\n\n    //! conversion to another data type\n    template<typename T2> operator Complex<T2>() const;\n    //! conjugation\n    Complex conj() const;\n\n    _Tp re, im; //< the real and the imaginary parts\n};\n\ntypedef Complex<float> Complexf;\ntypedef Complex<double> Complexd;\n\ntemplate<typename _Tp> class DataType< Complex<_Tp> >\n{\npublic:\n    typedef Complex<_Tp> value_type;\n    typedef value_type   work_type;\n    typedef _Tp          channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = 2,\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels) };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n\n\n//////////////////////////////// Point_ ////////////////////////////////\n\n/** @brief Template class for 2D points specified by its coordinates `x` and `y`.\n\nAn instance of the class is interchangeable with C structures, CvPoint and CvPoint2D32f . There is\nalso a cast operator to convert point coordinates to the specified type. The conversion from\nfloating-point coordinates to integer coordinates is done by rounding. Commonly, the conversion\nuses this operation for each of the coordinates. Besides the class members listed in the\ndeclaration above, the following operations on points are implemented:\n@code\n    pt1 = pt2 + pt3;\n    pt1 = pt2 - pt3;\n    pt1 = pt2 * a;\n    pt1 = a * pt2;\n    pt1 = pt2 / a;\n    pt1 += pt2;\n    pt1 -= pt2;\n    pt1 *= a;\n    pt1 /= a;\n    double value = norm(pt); // L2 norm\n    pt1 == pt2;\n    pt1 != pt2;\n@endcode\nFor your convenience, the following type aliases are defined:\n@code\n    typedef Point_<int> Point2i;\n    typedef Point2i Point;\n    typedef Point_<float> Point2f;\n    typedef Point_<double> Point2d;\n@endcode\nExample:\n@code\n    Point2f a(0.3f, 0.f), b(0.f, 0.4f);\n    Point pt = (a + b)*10.f;\n    cout << pt.x << \", \" << pt.y << endl;\n@endcode\n*/\ntemplate<typename _Tp> class Point_\n{\npublic:\n    typedef _Tp value_type;\n\n    // various constructors\n    Point_();\n    Point_(_Tp _x, _Tp _y);\n    Point_(const Point_& pt);\n    Point_(const Size_<_Tp>& sz);\n    Point_(const Vec<_Tp, 2>& v);\n\n    Point_& operator = (const Point_& pt);\n    //! conversion to another data type\n    template<typename _Tp2> operator Point_<_Tp2>() const;\n\n    //! conversion to the old-style C structures\n    operator Vec<_Tp, 2>() const;\n\n    //! dot product\n    _Tp dot(const Point_& pt) const;\n    //! dot product computed in double-precision arithmetics\n    double ddot(const Point_& pt) const;\n    //! cross-product\n    double cross(const Point_& pt) const;\n    //! checks whether the point is inside the specified rectangle\n    bool inside(const Rect_<_Tp>& r) const;\n\n    _Tp x, y; //< the point coordinates\n};\n\ntypedef Point_<int> Point2i;\ntypedef Point_<float> Point2f;\ntypedef Point_<double> Point2d;\ntypedef Point2i Point;\n\ntemplate<typename _Tp> class DataType< Point_<_Tp> >\n{\npublic:\n    typedef Point_<_Tp>                               value_type;\n    typedef Point_<typename DataType<_Tp>::work_type> work_type;\n    typedef _Tp                                       channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = 2,\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n\n\n//////////////////////////////// Point3_ ////////////////////////////////\n\n/** @brief Template class for 3D points specified by its coordinates `x`, `y` and `z`.\n\nAn instance of the class is interchangeable with the C structure CvPoint2D32f . Similarly to\nPoint_ , the coordinates of 3D points can be converted to another type. The vector arithmetic and\ncomparison operations are also supported.\n\nThe following Point3_\\<\\> aliases are available:\n@code\n    typedef Point3_<int> Point3i;\n    typedef Point3_<float> Point3f;\n    typedef Point3_<double> Point3d;\n@endcode\n@see cv::Point3i, cv::Point3f and cv::Point3d\n*/\ntemplate<typename _Tp> class Point3_\n{\npublic:\n    typedef _Tp value_type;\n\n    // various constructors\n    Point3_();\n    Point3_(_Tp _x, _Tp _y, _Tp _z);\n    Point3_(const Point3_& pt);\n    explicit Point3_(const Point_<_Tp>& pt);\n    Point3_(const Vec<_Tp, 3>& v);\n\n    Point3_& operator = (const Point3_& pt);\n    //! conversion to another data type\n    template<typename _Tp2> operator Point3_<_Tp2>() const;\n    //! conversion to cv::Vec<>\n    operator Vec<_Tp, 3>() const;\n\n    //! dot product\n    _Tp dot(const Point3_& pt) const;\n    //! dot product computed in double-precision arithmetics\n    double ddot(const Point3_& pt) const;\n    //! cross product of the 2 3D points\n    Point3_ cross(const Point3_& pt) const;\n\n    _Tp x, y, z; //< the point coordinates\n};\n\ntypedef Point3_<int> Point3i;\ntypedef Point3_<float> Point3f;\ntypedef Point3_<double> Point3d;\n\ntemplate<typename _Tp> class DataType< Point3_<_Tp> >\n{\npublic:\n    typedef Point3_<_Tp>                               value_type;\n    typedef Point3_<typename DataType<_Tp>::work_type> work_type;\n    typedef _Tp                                        channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = 3,\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n\n\n//////////////////////////////// Size_ ////////////////////////////////\n\n/** @brief Template class for specifying the size of an image or rectangle.\n\nThe class includes two members called width and height. The structure can be converted to and from\nthe old OpenCV structures CvSize and CvSize2D32f . The same set of arithmetic and comparison\noperations as for Point_ is available.\n\nOpenCV defines the following Size_\\<\\> aliases:\n@code\n    typedef Size_<int> Size2i;\n    typedef Size2i Size;\n    typedef Size_<float> Size2f;\n@endcode\n*/\ntemplate<typename _Tp> class Size_\n{\npublic:\n    typedef _Tp value_type;\n\n    //! various constructors\n    Size_();\n    Size_(_Tp _width, _Tp _height);\n    Size_(const Size_& sz);\n    Size_(const Point_<_Tp>& pt);\n\n    Size_& operator = (const Size_& sz);\n    //! the area (width*height)\n    _Tp area() const;\n\n    //! conversion of another data type.\n    template<typename _Tp2> operator Size_<_Tp2>() const;\n\n    _Tp width, height; // the width and the height\n};\n\ntypedef Size_<int> Size2i;\ntypedef Size_<float> Size2f;\ntypedef Size_<double> Size2d;\ntypedef Size2i Size;\n\ntemplate<typename _Tp> class DataType< Size_<_Tp> >\n{\npublic:\n    typedef Size_<_Tp>                               value_type;\n    typedef Size_<typename DataType<_Tp>::work_type> work_type;\n    typedef _Tp                                      channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = 2,\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n\n\n//////////////////////////////// Rect_ ////////////////////////////////\n\n/** @brief Template class for 2D rectangles\n\ndescribed by the following parameters:\n-   Coordinates of the top-left corner. This is a default interpretation of Rect_::x and Rect_::y\n    in OpenCV. Though, in your algorithms you may count x and y from the bottom-left corner.\n-   Rectangle width and height.\n\nOpenCV typically assumes that the top and left boundary of the rectangle are inclusive, while the\nright and bottom boundaries are not. For example, the method Rect_::contains returns true if\n\n\\f[x  \\leq pt.x < x+width,\n      y  \\leq pt.y < y+height\\f]\n\nVirtually every loop over an image ROI in OpenCV (where ROI is specified by Rect_\\<int\\> ) is\nimplemented as:\n@code\n    for(int y = roi.y; y < roi.y + roi.height; y++)\n        for(int x = roi.x; x < roi.x + roi.width; x++)\n        {\n            // ...\n        }\n@endcode\nIn addition to the class members, the following operations on rectangles are implemented:\n-   \\f$\\texttt{rect} = \\texttt{rect} \\pm \\texttt{point}\\f$ (shifting a rectangle by a certain offset)\n-   \\f$\\texttt{rect} = \\texttt{rect} \\pm \\texttt{size}\\f$ (expanding or shrinking a rectangle by a\n    certain amount)\n-   rect += point, rect -= point, rect += size, rect -= size (augmenting operations)\n-   rect = rect1 & rect2 (rectangle intersection)\n-   rect = rect1 | rect2 (minimum area rectangle containing rect1 and rect2 )\n-   rect &= rect1, rect |= rect1 (and the corresponding augmenting operations)\n-   rect == rect1, rect != rect1 (rectangle comparison)\n\nThis is an example how the partial ordering on rectangles can be established (rect1 \\f$\\subseteq\\f$\nrect2):\n@code\n    template<typename _Tp> inline bool\n    operator <= (const Rect_<_Tp>& r1, const Rect_<_Tp>& r2)\n    {\n        return (r1 & r2) == r1;\n    }\n@endcode\nFor your convenience, the Rect_\\<\\> alias is available: cv::Rect\n*/\ntemplate<typename _Tp> class Rect_\n{\npublic:\n    typedef _Tp value_type;\n\n    //! various constructors\n    Rect_();\n    Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height);\n    Rect_(const Rect_& r);\n    Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz);\n    Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2);\n\n    Rect_& operator = ( const Rect_& r );\n    //! the top-left corner\n    Point_<_Tp> tl() const;\n    //! the bottom-right corner\n    Point_<_Tp> br() const;\n\n    //! size (width, height) of the rectangle\n    Size_<_Tp> size() const;\n    //! area (width*height) of the rectangle\n    _Tp area() const;\n\n    //! conversion to another data type\n    template<typename _Tp2> operator Rect_<_Tp2>() const;\n\n    //! checks whether the rectangle contains the point\n    bool contains(const Point_<_Tp>& pt) const;\n\n    _Tp x, y, width, height; //< the top-left corner, as well as width and height of the rectangle\n};\n\ntypedef Rect_<int> Rect2i;\ntypedef Rect_<float> Rect2f;\ntypedef Rect_<double> Rect2d;\ntypedef Rect2i Rect;\n\ntemplate<typename _Tp> class DataType< Rect_<_Tp> >\n{\npublic:\n    typedef Rect_<_Tp>                               value_type;\n    typedef Rect_<typename DataType<_Tp>::work_type> work_type;\n    typedef _Tp                                      channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = 4,\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n\n\n///////////////////////////// RotatedRect /////////////////////////////\n\n/** @brief The class represents rotated (i.e. not up-right) rectangles on a plane.\n\nEach rectangle is specified by the center point (mass center), length of each side (represented by\ncv::Size2f structure) and the rotation angle in degrees.\n\nThe sample below demonstrates how to use RotatedRect:\n@code\n    Mat image(200, 200, CV_8UC3, Scalar(0));\n    RotatedRect rRect = RotatedRect(Point2f(100,100), Size2f(100,50), 30);\n\n    Point2f vertices[4];\n    rRect.points(vertices);\n    for (int i = 0; i < 4; i++)\n        line(image, vertices[i], vertices[(i+1)%4], Scalar(0,255,0));\n\n    Rect brect = rRect.boundingRect();\n    rectangle(image, brect, Scalar(255,0,0));\n\n    imshow(\"rectangles\", image);\n    waitKey(0);\n@endcode\n![image](pics/rotatedrect.png)\n\n@sa CamShift, fitEllipse, minAreaRect, CvBox2D\n*/\nclass CV_EXPORTS RotatedRect\n{\npublic:\n    //! various constructors\n    RotatedRect();\n    /**\n    @param center The rectangle mass center.\n    @param size Width and height of the rectangle.\n    @param angle The rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc.,\n    the rectangle becomes an up-right rectangle.\n    */\n    RotatedRect(const Point2f& center, const Size2f& size, float angle);\n    /**\n    Any 3 end points of the RotatedRect. They must be given in order (either clockwise or\n    anticlockwise).\n     */\n    RotatedRect(const Point2f& point1, const Point2f& point2, const Point2f& point3);\n\n    /** returns 4 vertices of the rectangle\n    @param pts The points array for storing rectangle vertices.\n    */\n    void points(Point2f pts[]) const;\n    //! returns the minimal up-right rectangle containing the rotated rectangle\n    Rect boundingRect() const;\n\n    Point2f center; //< the rectangle mass center\n    Size2f size;    //< width and height of the rectangle\n    float angle;    //< the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.\n};\n\ntemplate<> class DataType< RotatedRect >\n{\npublic:\n    typedef RotatedRect  value_type;\n    typedef value_type   work_type;\n    typedef float        channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = (int)sizeof(value_type)/sizeof(channel_type), // 5\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n\n\n//////////////////////////////// Range /////////////////////////////////\n\n/** @brief Template class specifying a continuous subsequence (slice) of a sequence.\n\nThe class is used to specify a row or a column span in a matrix ( Mat ) and for many other purposes.\nRange(a,b) is basically the same as a:b in Matlab or a..b in Python. As in Python, start is an\ninclusive left boundary of the range and end is an exclusive right boundary of the range. Such a\nhalf-opened interval is usually denoted as \\f$[start,end)\\f$ .\n\nThe static method Range::all() returns a special variable that means \"the whole sequence\" or \"the\nwhole range\", just like \" : \" in Matlab or \" ... \" in Python. All the methods and functions in\nOpenCV that take Range support this special Range::all() value. But, of course, in case of your own\ncustom processing, you will probably have to check and handle it explicitly:\n@code\n    void my_function(..., const Range& r, ....)\n    {\n        if(r == Range::all()) {\n            // process all the data\n        }\n        else {\n            // process [r.start, r.end)\n        }\n    }\n@endcode\n*/\nclass CV_EXPORTS Range\n{\npublic:\n    Range();\n    Range(int _start, int _end);\n    int size() const;\n    bool empty() const;\n    static Range all();\n\n    int start, end;\n};\n\ntemplate<> class DataType<Range>\n{\npublic:\n    typedef Range      value_type;\n    typedef value_type work_type;\n    typedef int        channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = 2,\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n\n\n//////////////////////////////// Scalar_ ///////////////////////////////\n\n/** @brief Template class for a 4-element vector derived from Vec.\n\nBeing derived from Vec\\<_Tp, 4\\> , Scalar_ and Scalar can be used just as typical 4-element\nvectors. In addition, they can be converted to/from CvScalar . The type Scalar is widely used in\nOpenCV to pass pixel values.\n*/\ntemplate<typename _Tp> class Scalar_ : public Vec<_Tp, 4>\n{\npublic:\n    //! various constructors\n    Scalar_();\n    Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0);\n    Scalar_(_Tp v0);\n\n    template<typename _Tp2, int cn>\n    Scalar_(const Vec<_Tp2, cn>& v);\n\n    //! returns a scalar with all elements set to v0\n    static Scalar_<_Tp> all(_Tp v0);\n\n    //! conversion to another data type\n    template<typename T2> operator Scalar_<T2>() const;\n\n    //! per-element product\n    Scalar_<_Tp> mul(const Scalar_<_Tp>& a, double scale=1 ) const;\n\n    // returns (v0, -v1, -v2, -v3)\n    Scalar_<_Tp> conj() const;\n\n    // returns true iff v1 == v2 == v3 == 0\n    bool isReal() const;\n};\n\ntypedef Scalar_<double> Scalar;\n\ntemplate<typename _Tp> class DataType< Scalar_<_Tp> >\n{\npublic:\n    typedef Scalar_<_Tp>                               value_type;\n    typedef Scalar_<typename DataType<_Tp>::work_type> work_type;\n    typedef _Tp                                        channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = 4,\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n\n\n/////////////////////////////// KeyPoint ////////////////////////////////\n\n/** @brief Data structure for salient point detectors.\n\nThe class instance stores a keypoint, i.e. a point feature found by one of many available keypoint\ndetectors, such as Harris corner detector, cv::FAST, cv::StarDetector, cv::SURF, cv::SIFT,\ncv::LDetector etc.\n\nThe keypoint is characterized by the 2D position, scale (proportional to the diameter of the\nneighborhood that needs to be taken into account), orientation and some other parameters. The\nkeypoint neighborhood is then analyzed by another algorithm that builds a descriptor (usually\nrepresented as a feature vector). The keypoints representing the same object in different images\ncan then be matched using cv::KDTree or another method.\n*/\nclass CV_EXPORTS_W_SIMPLE KeyPoint\n{\npublic:\n    //! the default constructor\n    CV_WRAP KeyPoint();\n    /**\n    @param _pt x & y coordinates of the keypoint\n    @param _size keypoint diameter\n    @param _angle keypoint orientation\n    @param _response keypoint detector response on the keypoint (that is, strength of the keypoint)\n    @param _octave pyramid octave in which the keypoint has been detected\n    @param _class_id object id\n     */\n    KeyPoint(Point2f _pt, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1);\n    /**\n    @param x x-coordinate of the keypoint\n    @param y y-coordinate of the keypoint\n    @param _size keypoint diameter\n    @param _angle keypoint orientation\n    @param _response keypoint detector response on the keypoint (that is, strength of the keypoint)\n    @param _octave pyramid octave in which the keypoint has been detected\n    @param _class_id object id\n     */\n    CV_WRAP KeyPoint(float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1);\n\n    size_t hash() const;\n\n    /**\n    This method converts vector of keypoints to vector of points or the reverse, where each keypoint is\n    assigned the same size and the same orientation.\n\n    @param keypoints Keypoints obtained from any feature detection algorithm like SIFT/SURF/ORB\n    @param points2f Array of (x,y) coordinates of each keypoint\n    @param keypointIndexes Array of indexes of keypoints to be converted to points. (Acts like a mask to\n    convert only specified keypoints)\n    */\n    CV_WRAP static void convert(const std::vector<KeyPoint>& keypoints,\n                                CV_OUT std::vector<Point2f>& points2f,\n                                const std::vector<int>& keypointIndexes=std::vector<int>());\n    /** @overload\n    @param points2f Array of (x,y) coordinates of each keypoint\n    @param keypoints Keypoints obtained from any feature detection algorithm like SIFT/SURF/ORB\n    @param size keypoint diameter\n    @param response keypoint detector response on the keypoint (that is, strength of the keypoint)\n    @param octave pyramid octave in which the keypoint has been detected\n    @param class_id object id\n    */\n    CV_WRAP static void convert(const std::vector<Point2f>& points2f,\n                                CV_OUT std::vector<KeyPoint>& keypoints,\n                                float size=1, float response=1, int octave=0, int class_id=-1);\n\n    /**\n    This method computes overlap for pair of keypoints. Overlap is the ratio between area of keypoint\n    regions' intersection and area of keypoint regions' union (considering keypoint region as circle).\n    If they don't overlap, we get zero. If they coincide at same location with same size, we get 1.\n    @param kp1 First keypoint\n    @param kp2 Second keypoint\n    */\n    CV_WRAP static float overlap(const KeyPoint& kp1, const KeyPoint& kp2);\n\n    CV_PROP_RW Point2f pt; //!< coordinates of the keypoints\n    CV_PROP_RW float size; //!< diameter of the meaningful keypoint neighborhood\n    CV_PROP_RW float angle; //!< computed orientation of the keypoint (-1 if not applicable);\n                            //!< it's in [0,360) degrees and measured relative to\n                            //!< image coordinate system, ie in clockwise.\n    CV_PROP_RW float response; //!< the response by which the most strong keypoints have been selected. Can be used for the further sorting or subsampling\n    CV_PROP_RW int octave; //!< octave (pyramid layer) from which the keypoint has been extracted\n    CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to)\n};\n\ntemplate<> class DataType<KeyPoint>\n{\npublic:\n    typedef KeyPoint      value_type;\n    typedef float         work_type;\n    typedef float         channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = (int)(sizeof(value_type)/sizeof(channel_type)), // 7\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n\n\n//////////////////////////////// DMatch /////////////////////////////////\n\n/** @brief Class for matching keypoint descriptors\n\nquery descriptor index, train descriptor index, train image index, and distance between\ndescriptors.\n*/\nclass CV_EXPORTS_W_SIMPLE DMatch\n{\npublic:\n    CV_WRAP DMatch();\n    CV_WRAP DMatch(int _queryIdx, int _trainIdx, float _distance);\n    CV_WRAP DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance);\n\n    CV_PROP_RW int queryIdx; // query descriptor index\n    CV_PROP_RW int trainIdx; // train descriptor index\n    CV_PROP_RW int imgIdx;   // train image index\n\n    CV_PROP_RW float distance;\n\n    // less is better\n    bool operator<(const DMatch &m) const;\n};\n\ntemplate<> class DataType<DMatch>\n{\npublic:\n    typedef DMatch      value_type;\n    typedef int         work_type;\n    typedef int         channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = (int)(sizeof(value_type)/sizeof(channel_type)), // 4\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n\n\n///////////////////////////// TermCriteria //////////////////////////////\n\n/** @brief The class defining termination criteria for iterative algorithms.\n\nYou can initialize it by default constructor and then override any parameters, or the structure may\nbe fully initialized using the advanced variant of the constructor.\n*/\nclass CV_EXPORTS TermCriteria\n{\npublic:\n    /**\n      Criteria type, can be one of: COUNT, EPS or COUNT + EPS\n    */\n    enum Type\n    {\n        COUNT=1, //!< the maximum number of iterations or elements to compute\n        MAX_ITER=COUNT, //!< ditto\n        EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops\n    };\n\n    //! default constructor\n    TermCriteria();\n    /**\n    @param type The type of termination criteria, one of TermCriteria::Type\n    @param maxCount The maximum number of iterations or elements to compute.\n    @param epsilon The desired accuracy or change in parameters at which the iterative algorithm stops.\n    */\n    TermCriteria(int type, int maxCount, double epsilon);\n\n    int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS\n    int maxCount; // the maximum number of iterations/elements\n    double epsilon; // the desired accuracy\n};\n\n\n//! @} core_basic\n\n///////////////////////// raster image moments //////////////////////////\n\n//! @addtogroup imgproc_shape\n//! @{\n\n/** @brief struct returned by cv::moments\n\nThe spatial moments \\f$\\texttt{Moments::m}_{ji}\\f$ are computed as:\n\n\\f[\\texttt{m} _{ji}= \\sum _{x,y}  \\left ( \\texttt{array} (x,y)  \\cdot x^j  \\cdot y^i \\right )\\f]\n\nThe central moments \\f$\\texttt{Moments::mu}_{ji}\\f$ are computed as:\n\n\\f[\\texttt{mu} _{ji}= \\sum _{x,y}  \\left ( \\texttt{array} (x,y)  \\cdot (x -  \\bar{x} )^j  \\cdot (y -  \\bar{y} )^i \\right )\\f]\n\nwhere \\f$(\\bar{x}, \\bar{y})\\f$ is the mass center:\n\n\\f[\\bar{x} = \\frac{\\texttt{m}_{10}}{\\texttt{m}_{00}} , \\; \\bar{y} = \\frac{\\texttt{m}_{01}}{\\texttt{m}_{00}}\\f]\n\nThe normalized central moments \\f$\\texttt{Moments::nu}_{ij}\\f$ are computed as:\n\n\\f[\\texttt{nu} _{ji}= \\frac{\\texttt{mu}_{ji}}{\\texttt{m}_{00}^{(i+j)/2+1}} .\\f]\n\n@note\n\\f$\\texttt{mu}_{00}=\\texttt{m}_{00}\\f$, \\f$\\texttt{nu}_{00}=1\\f$\n\\f$\\texttt{nu}_{10}=\\texttt{mu}_{10}=\\texttt{mu}_{01}=\\texttt{mu}_{10}=0\\f$ , hence the values are not\nstored.\n\nThe moments of a contour are defined in the same way but computed using the Green's formula (see\n<http://en.wikipedia.org/wiki/Green_theorem>). So, due to a limited raster resolution, the moments\ncomputed for a contour are slightly different from the moments computed for the same rasterized\ncontour.\n\n@note\nSince the contour moments are computed using Green formula, you may get seemingly odd results for\ncontours with self-intersections, e.g. a zero area (m00) for butterfly-shaped contours.\n */\nclass CV_EXPORTS_W_MAP Moments\n{\npublic:\n    //! the default constructor\n    Moments();\n    //! the full constructor\n    Moments(double m00, double m10, double m01, double m20, double m11,\n            double m02, double m30, double m21, double m12, double m03 );\n    ////! the conversion from CvMoments\n    //Moments( const CvMoments& moments );\n    ////! the conversion to CvMoments\n    //operator CvMoments() const;\n\n    //! @name spatial moments\n    //! @{\n    CV_PROP_RW double  m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;\n    //! @}\n\n    //! @name central moments\n    //! @{\n    CV_PROP_RW double  mu20, mu11, mu02, mu30, mu21, mu12, mu03;\n    //! @}\n\n    //! @name central normalized moments\n    //! @{\n    CV_PROP_RW double  nu20, nu11, nu02, nu30, nu21, nu12, nu03;\n    //! @}\n};\n\ntemplate<> class DataType<Moments>\n{\npublic:\n    typedef Moments     value_type;\n    typedef double      work_type;\n    typedef double      channel_type;\n\n    enum { generic_type = 0,\n           depth        = DataType<channel_type>::depth,\n           channels     = (int)(sizeof(value_type)/sizeof(channel_type)), // 24\n           fmt          = DataType<channel_type>::fmt + ((channels - 1) << 8),\n           type         = CV_MAKETYPE(depth, channels)\n         };\n\n    typedef Vec<channel_type, channels> vec_type;\n};\n\n//! @} imgproc_shape\n\n//! @cond IGNORED\n\n/////////////////////////////////////////////////////////////////////////\n///////////////////////////// Implementation ////////////////////////////\n/////////////////////////////////////////////////////////////////////////\n\n//////////////////////////////// Complex ////////////////////////////////\n\ntemplate<typename _Tp> inline\nComplex<_Tp>::Complex()\n    : re(0), im(0) {}\n\ntemplate<typename _Tp> inline\nComplex<_Tp>::Complex( _Tp _re, _Tp _im )\n    : re(_re), im(_im) {}\n\ntemplate<typename _Tp> template<typename T2> inline\nComplex<_Tp>::operator Complex<T2>() const\n{\n    return Complex<T2>(saturate_cast<T2>(re), saturate_cast<T2>(im));\n}\n\ntemplate<typename _Tp> inline\nComplex<_Tp> Complex<_Tp>::conj() const\n{\n    return Complex<_Tp>(re, -im);\n}\n\n\ntemplate<typename _Tp> static inline\nbool operator == (const Complex<_Tp>& a, const Complex<_Tp>& b)\n{\n    return a.re == b.re && a.im == b.im;\n}\n\ntemplate<typename _Tp> static inline\nbool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b)\n{\n    return a.re != b.re || a.im != b.im;\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b)\n{\n    return Complex<_Tp>( a.re + b.re, a.im + b.im );\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp>& operator += (Complex<_Tp>& a, const Complex<_Tp>& b)\n{\n    a.re += b.re; a.im += b.im;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator - (const Complex<_Tp>& a, const Complex<_Tp>& b)\n{\n    return Complex<_Tp>( a.re - b.re, a.im - b.im );\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp>& operator -= (Complex<_Tp>& a, const Complex<_Tp>& b)\n{\n    a.re -= b.re; a.im -= b.im;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator - (const Complex<_Tp>& a)\n{\n    return Complex<_Tp>(-a.re, -a.im);\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator * (const Complex<_Tp>& a, const Complex<_Tp>& b)\n{\n    return Complex<_Tp>( a.re*b.re - a.im*b.im, a.re*b.im + a.im*b.re );\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator * (const Complex<_Tp>& a, _Tp b)\n{\n    return Complex<_Tp>( a.re*b, a.im*b );\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator * (_Tp b, const Complex<_Tp>& a)\n{\n    return Complex<_Tp>( a.re*b, a.im*b );\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator + (const Complex<_Tp>& a, _Tp b)\n{\n    return Complex<_Tp>( a.re + b, a.im );\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator - (const Complex<_Tp>& a, _Tp b)\n{ return Complex<_Tp>( a.re - b, a.im ); }\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator + (_Tp b, const Complex<_Tp>& a)\n{\n    return Complex<_Tp>( a.re + b, a.im );\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator - (_Tp b, const Complex<_Tp>& a)\n{\n    return Complex<_Tp>( b - a.re, -a.im );\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp>& operator += (Complex<_Tp>& a, _Tp b)\n{\n    a.re += b; return a;\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp>& operator -= (Complex<_Tp>& a, _Tp b)\n{\n    a.re -= b; return a;\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp>& operator *= (Complex<_Tp>& a, _Tp b)\n{\n    a.re *= b; a.im *= b; return a;\n}\n\ntemplate<typename _Tp> static inline\ndouble abs(const Complex<_Tp>& a)\n{\n    return std::sqrt( (double)a.re*a.re + (double)a.im*a.im);\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator / (const Complex<_Tp>& a, const Complex<_Tp>& b)\n{\n    double t = 1./((double)b.re*b.re + (double)b.im*b.im);\n    return Complex<_Tp>( (_Tp)((a.re*b.re + a.im*b.im)*t),\n                        (_Tp)((-a.re*b.im + a.im*b.re)*t) );\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp>& operator /= (Complex<_Tp>& a, const Complex<_Tp>& b)\n{\n    return (a = a / b);\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator / (const Complex<_Tp>& a, _Tp b)\n{\n    _Tp t = (_Tp)1/b;\n    return Complex<_Tp>( a.re*t, a.im*t );\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator / (_Tp b, const Complex<_Tp>& a)\n{\n    return Complex<_Tp>(b)/a;\n}\n\ntemplate<typename _Tp> static inline\nComplex<_Tp> operator /= (const Complex<_Tp>& a, _Tp b)\n{\n    _Tp t = (_Tp)1/b;\n    a.re *= t; a.im *= t; return a;\n}\n\n\n\n//////////////////////////////// 2D Point ///////////////////////////////\n\ntemplate<typename _Tp> inline\nPoint_<_Tp>::Point_()\n    : x(0), y(0) {}\n\ntemplate<typename _Tp> inline\nPoint_<_Tp>::Point_(_Tp _x, _Tp _y)\n    : x(_x), y(_y) {}\n\ntemplate<typename _Tp> inline\nPoint_<_Tp>::Point_(const Point_& pt)\n    : x(pt.x), y(pt.y) {}\n\ntemplate<typename _Tp> inline\nPoint_<_Tp>::Point_(const Size_<_Tp>& sz)\n    : x(sz.width), y(sz.height) {}\n\ntemplate<typename _Tp> inline\nPoint_<_Tp>::Point_(const Vec<_Tp,2>& v)\n    : x(v[0]), y(v[1]) {}\n\ntemplate<typename _Tp> inline\nPoint_<_Tp>& Point_<_Tp>::operator = (const Point_& pt)\n{\n    x = pt.x; y = pt.y;\n    return *this;\n}\n\ntemplate<typename _Tp> template<typename _Tp2> inline\nPoint_<_Tp>::operator Point_<_Tp2>() const\n{\n    return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y));\n}\n\ntemplate<typename _Tp> inline\nPoint_<_Tp>::operator Vec<_Tp, 2>() const\n{\n    return Vec<_Tp, 2>(x, y);\n}\n\ntemplate<typename _Tp> inline\n_Tp Point_<_Tp>::dot(const Point_& pt) const\n{\n    return saturate_cast<_Tp>(x*pt.x + y*pt.y);\n}\n\ntemplate<typename _Tp> inline\ndouble Point_<_Tp>::ddot(const Point_& pt) const\n{\n    return (double)x*pt.x + (double)y*pt.y;\n}\n\ntemplate<typename _Tp> inline\ndouble Point_<_Tp>::cross(const Point_& pt) const\n{\n    return (double)x*pt.y - (double)y*pt.x;\n}\n\ntemplate<typename _Tp> inline bool\nPoint_<_Tp>::inside( const Rect_<_Tp>& r ) const\n{\n    return r.contains(*this);\n}\n\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp>& operator += (Point_<_Tp>& a, const Point_<_Tp>& b)\n{\n    a.x += b.x;\n    a.y += b.y;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp>& operator -= (Point_<_Tp>& a, const Point_<_Tp>& b)\n{\n    a.x -= b.x;\n    a.y -= b.y;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp>& operator *= (Point_<_Tp>& a, int b)\n{\n    a.x = saturate_cast<_Tp>(a.x * b);\n    a.y = saturate_cast<_Tp>(a.y * b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp>& operator *= (Point_<_Tp>& a, float b)\n{\n    a.x = saturate_cast<_Tp>(a.x * b);\n    a.y = saturate_cast<_Tp>(a.y * b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp>& operator *= (Point_<_Tp>& a, double b)\n{\n    a.x = saturate_cast<_Tp>(a.x * b);\n    a.y = saturate_cast<_Tp>(a.y * b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp>& operator /= (Point_<_Tp>& a, int b)\n{\n    a.x = saturate_cast<_Tp>(a.x / b);\n    a.y = saturate_cast<_Tp>(a.y / b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp>& operator /= (Point_<_Tp>& a, float b)\n{\n    a.x = saturate_cast<_Tp>(a.x / b);\n    a.y = saturate_cast<_Tp>(a.y / b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp>& operator /= (Point_<_Tp>& a, double b)\n{\n    a.x = saturate_cast<_Tp>(a.x / b);\n    a.y = saturate_cast<_Tp>(a.y / b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\ndouble norm(const Point_<_Tp>& pt)\n{\n    return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y);\n}\n\ntemplate<typename _Tp> static inline\nbool operator == (const Point_<_Tp>& a, const Point_<_Tp>& b)\n{\n    return a.x == b.x && a.y == b.y;\n}\n\ntemplate<typename _Tp> static inline\nbool operator != (const Point_<_Tp>& a, const Point_<_Tp>& b)\n{\n    return a.x != b.x || a.y != b.y;\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator + (const Point_<_Tp>& a, const Point_<_Tp>& b)\n{\n    return Point_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y) );\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator - (const Point_<_Tp>& a, const Point_<_Tp>& b)\n{\n    return Point_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y) );\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator - (const Point_<_Tp>& a)\n{\n    return Point_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y) );\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator * (const Point_<_Tp>& a, int b)\n{\n    return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) );\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator * (int a, const Point_<_Tp>& b)\n{\n    return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) );\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator * (const Point_<_Tp>& a, float b)\n{\n    return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) );\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator * (float a, const Point_<_Tp>& b)\n{\n    return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) );\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator * (const Point_<_Tp>& a, double b)\n{\n    return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) );\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator * (double a, const Point_<_Tp>& b)\n{\n    return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) );\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b)\n{\n    Matx<_Tp, 2, 1> tmp = a * Vec<_Tp,2>(b.x, b.y);\n    return Point_<_Tp>(tmp.val[0], tmp.val[1]);\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b)\n{\n    Matx<_Tp, 3, 1> tmp = a * Vec<_Tp,3>(b.x, b.y, 1);\n    return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]);\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator / (const Point_<_Tp>& a, int b)\n{\n    Point_<_Tp> tmp(a);\n    tmp /= b;\n    return tmp;\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator / (const Point_<_Tp>& a, float b)\n{\n    Point_<_Tp> tmp(a);\n    tmp /= b;\n    return tmp;\n}\n\ntemplate<typename _Tp> static inline\nPoint_<_Tp> operator / (const Point_<_Tp>& a, double b)\n{\n    Point_<_Tp> tmp(a);\n    tmp /= b;\n    return tmp;\n}\n\n\n\n//////////////////////////////// 3D Point ///////////////////////////////\n\ntemplate<typename _Tp> inline\nPoint3_<_Tp>::Point3_()\n    : x(0), y(0), z(0) {}\n\ntemplate<typename _Tp> inline\nPoint3_<_Tp>::Point3_(_Tp _x, _Tp _y, _Tp _z)\n    : x(_x), y(_y), z(_z) {}\n\ntemplate<typename _Tp> inline\nPoint3_<_Tp>::Point3_(const Point3_& pt)\n    : x(pt.x), y(pt.y), z(pt.z) {}\n\ntemplate<typename _Tp> inline\nPoint3_<_Tp>::Point3_(const Point_<_Tp>& pt)\n    : x(pt.x), y(pt.y), z(_Tp()) {}\n\ntemplate<typename _Tp> inline\nPoint3_<_Tp>::Point3_(const Vec<_Tp, 3>& v)\n    : x(v[0]), y(v[1]), z(v[2]) {}\n\ntemplate<typename _Tp> template<typename _Tp2> inline\nPoint3_<_Tp>::operator Point3_<_Tp2>() const\n{\n    return Point3_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(z));\n}\n\ntemplate<typename _Tp> inline\nPoint3_<_Tp>::operator Vec<_Tp, 3>() const\n{\n    return Vec<_Tp, 3>(x, y, z);\n}\n\ntemplate<typename _Tp> inline\nPoint3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt)\n{\n    x = pt.x; y = pt.y; z = pt.z;\n    return *this;\n}\n\ntemplate<typename _Tp> inline\n_Tp Point3_<_Tp>::dot(const Point3_& pt) const\n{\n    return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z);\n}\n\ntemplate<typename _Tp> inline\ndouble Point3_<_Tp>::ddot(const Point3_& pt) const\n{\n    return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z;\n}\n\ntemplate<typename _Tp> inline\nPoint3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const\n{\n    return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x);\n}\n\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp>& operator += (Point3_<_Tp>& a, const Point3_<_Tp>& b)\n{\n    a.x += b.x;\n    a.y += b.y;\n    a.z += b.z;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp>& operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b)\n{\n    a.x -= b.x;\n    a.y -= b.y;\n    a.z -= b.z;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp>& operator *= (Point3_<_Tp>& a, int b)\n{\n    a.x = saturate_cast<_Tp>(a.x * b);\n    a.y = saturate_cast<_Tp>(a.y * b);\n    a.z = saturate_cast<_Tp>(a.z * b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp>& operator *= (Point3_<_Tp>& a, float b)\n{\n    a.x = saturate_cast<_Tp>(a.x * b);\n    a.y = saturate_cast<_Tp>(a.y * b);\n    a.z = saturate_cast<_Tp>(a.z * b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp>& operator *= (Point3_<_Tp>& a, double b)\n{\n    a.x = saturate_cast<_Tp>(a.x * b);\n    a.y = saturate_cast<_Tp>(a.y * b);\n    a.z = saturate_cast<_Tp>(a.z * b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp>& operator /= (Point3_<_Tp>& a, int b)\n{\n    a.x = saturate_cast<_Tp>(a.x / b);\n    a.y = saturate_cast<_Tp>(a.y / b);\n    a.z = saturate_cast<_Tp>(a.z / b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp>& operator /= (Point3_<_Tp>& a, float b)\n{\n    a.x = saturate_cast<_Tp>(a.x / b);\n    a.y = saturate_cast<_Tp>(a.y / b);\n    a.z = saturate_cast<_Tp>(a.z / b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp>& operator /= (Point3_<_Tp>& a, double b)\n{\n    a.x = saturate_cast<_Tp>(a.x / b);\n    a.y = saturate_cast<_Tp>(a.y / b);\n    a.z = saturate_cast<_Tp>(a.z / b);\n    return a;\n}\n\ntemplate<typename _Tp> static inline\ndouble norm(const Point3_<_Tp>& pt)\n{\n    return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z);\n}\n\ntemplate<typename _Tp> static inline\nbool operator == (const Point3_<_Tp>& a, const Point3_<_Tp>& b)\n{\n    return a.x == b.x && a.y == b.y && a.z == b.z;\n}\n\ntemplate<typename _Tp> static inline\nbool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b)\n{\n    return a.x != b.x || a.y != b.y || a.z != b.z;\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b)\n{\n    return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y), saturate_cast<_Tp>(a.z + b.z));\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator - (const Point3_<_Tp>& a, const Point3_<_Tp>& b)\n{\n    return Point3_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y), saturate_cast<_Tp>(a.z - b.z));\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator - (const Point3_<_Tp>& a)\n{\n    return Point3_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y), saturate_cast<_Tp>(-a.z) );\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator * (const Point3_<_Tp>& a, int b)\n{\n    return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b), saturate_cast<_Tp>(a.z*b) );\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator * (int a, const Point3_<_Tp>& b)\n{\n    return Point3_<_Tp>( saturate_cast<_Tp>(b.x * a), saturate_cast<_Tp>(b.y * a), saturate_cast<_Tp>(b.z * a) );\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator * (const Point3_<_Tp>& a, float b)\n{\n    return Point3_<_Tp>( saturate_cast<_Tp>(a.x * b), saturate_cast<_Tp>(a.y * b), saturate_cast<_Tp>(a.z * b) );\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator * (float a, const Point3_<_Tp>& b)\n{\n    return Point3_<_Tp>( saturate_cast<_Tp>(b.x * a), saturate_cast<_Tp>(b.y * a), saturate_cast<_Tp>(b.z * a) );\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator * (const Point3_<_Tp>& a, double b)\n{\n    return Point3_<_Tp>( saturate_cast<_Tp>(a.x * b), saturate_cast<_Tp>(a.y * b), saturate_cast<_Tp>(a.z * b) );\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator * (double a, const Point3_<_Tp>& b)\n{\n    return Point3_<_Tp>( saturate_cast<_Tp>(b.x * a), saturate_cast<_Tp>(b.y * a), saturate_cast<_Tp>(b.z * a) );\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b)\n{\n    Matx<_Tp, 3, 1> tmp = a * Vec<_Tp,3>(b.x, b.y, b.z);\n    return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]);\n}\n\ntemplate<typename _Tp> static inline\nMatx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b)\n{\n    return a * Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1);\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator / (const Point3_<_Tp>& a, int b)\n{\n    Point3_<_Tp> tmp(a);\n    tmp /= b;\n    return tmp;\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator / (const Point3_<_Tp>& a, float b)\n{\n    Point3_<_Tp> tmp(a);\n    tmp /= b;\n    return tmp;\n}\n\ntemplate<typename _Tp> static inline\nPoint3_<_Tp> operator / (const Point3_<_Tp>& a, double b)\n{\n    Point3_<_Tp> tmp(a);\n    tmp /= b;\n    return tmp;\n}\n\n\n\n////////////////////////////////// Size /////////////////////////////////\n\ntemplate<typename _Tp> inline\nSize_<_Tp>::Size_()\n    : width(0), height(0) {}\n\ntemplate<typename _Tp> inline\nSize_<_Tp>::Size_(_Tp _width, _Tp _height)\n    : width(_width), height(_height) {}\n\ntemplate<typename _Tp> inline\nSize_<_Tp>::Size_(const Size_& sz)\n    : width(sz.width), height(sz.height) {}\n\ntemplate<typename _Tp> inline\nSize_<_Tp>::Size_(const Point_<_Tp>& pt)\n    : width(pt.x), height(pt.y) {}\n\ntemplate<typename _Tp> template<typename _Tp2> inline\nSize_<_Tp>::operator Size_<_Tp2>() const\n{\n    return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height));\n}\n\ntemplate<typename _Tp> inline\nSize_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz)\n{\n    width = sz.width; height = sz.height;\n    return *this;\n}\n\ntemplate<typename _Tp> inline\n_Tp Size_<_Tp>::area() const\n{\n    return width * height;\n}\n\ntemplate<typename _Tp> static inline\nSize_<_Tp>& operator *= (Size_<_Tp>& a, _Tp b)\n{\n    a.width *= b;\n    a.height *= b;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nSize_<_Tp> operator * (const Size_<_Tp>& a, _Tp b)\n{\n    Size_<_Tp> tmp(a);\n    tmp *= b;\n    return tmp;\n}\n\ntemplate<typename _Tp> static inline\nSize_<_Tp>& operator /= (Size_<_Tp>& a, _Tp b)\n{\n    a.width /= b;\n    a.height /= b;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nSize_<_Tp> operator / (const Size_<_Tp>& a, _Tp b)\n{\n    Size_<_Tp> tmp(a);\n    tmp /= b;\n    return tmp;\n}\n\ntemplate<typename _Tp> static inline\nSize_<_Tp>& operator += (Size_<_Tp>& a, const Size_<_Tp>& b)\n{\n    a.width += b.width;\n    a.height += b.height;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nSize_<_Tp> operator + (const Size_<_Tp>& a, const Size_<_Tp>& b)\n{\n    Size_<_Tp> tmp(a);\n    tmp += b;\n    return tmp;\n}\n\ntemplate<typename _Tp> static inline\nSize_<_Tp>& operator -= (Size_<_Tp>& a, const Size_<_Tp>& b)\n{\n    a.width -= b.width;\n    a.height -= b.height;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nSize_<_Tp> operator - (const Size_<_Tp>& a, const Size_<_Tp>& b)\n{\n    Size_<_Tp> tmp(a);\n    tmp -= b;\n    return tmp;\n}\n\ntemplate<typename _Tp> static inline\nbool operator == (const Size_<_Tp>& a, const Size_<_Tp>& b)\n{\n    return a.width == b.width && a.height == b.height;\n}\n\ntemplate<typename _Tp> static inline\nbool operator != (const Size_<_Tp>& a, const Size_<_Tp>& b)\n{\n    return !(a == b);\n}\n\n\n\n////////////////////////////////// Rect /////////////////////////////////\n\ntemplate<typename _Tp> inline\nRect_<_Tp>::Rect_()\n    : x(0), y(0), width(0), height(0) {}\n\ntemplate<typename _Tp> inline\nRect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height)\n    : x(_x), y(_y), width(_width), height(_height) {}\n\ntemplate<typename _Tp> inline\nRect_<_Tp>::Rect_(const Rect_<_Tp>& r)\n    : x(r.x), y(r.y), width(r.width), height(r.height) {}\n\ntemplate<typename _Tp> inline\nRect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz)\n    : x(org.x), y(org.y), width(sz.width), height(sz.height) {}\n\ntemplate<typename _Tp> inline\nRect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2)\n{\n    x = std::min(pt1.x, pt2.x);\n    y = std::min(pt1.y, pt2.y);\n    width = std::max(pt1.x, pt2.x) - x;\n    height = std::max(pt1.y, pt2.y) - y;\n}\n\ntemplate<typename _Tp> inline\nRect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r )\n{\n    x = r.x;\n    y = r.y;\n    width = r.width;\n    height = r.height;\n    return *this;\n}\n\ntemplate<typename _Tp> inline\nPoint_<_Tp> Rect_<_Tp>::tl() const\n{\n    return Point_<_Tp>(x,y);\n}\n\ntemplate<typename _Tp> inline\nPoint_<_Tp> Rect_<_Tp>::br() const\n{\n    return Point_<_Tp>(x + width, y + height);\n}\n\ntemplate<typename _Tp> inline\nSize_<_Tp> Rect_<_Tp>::size() const\n{\n    return Size_<_Tp>(width, height);\n}\n\ntemplate<typename _Tp> inline\n_Tp Rect_<_Tp>::area() const\n{\n    return width * height;\n}\n\ntemplate<typename _Tp> template<typename _Tp2> inline\nRect_<_Tp>::operator Rect_<_Tp2>() const\n{\n    return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height));\n}\n\ntemplate<typename _Tp> inline\nbool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const\n{\n    return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height;\n}\n\n\ntemplate<typename _Tp> static inline\nRect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b )\n{\n    a.x += b.x;\n    a.y += b.y;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nRect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Point_<_Tp>& b )\n{\n    a.x -= b.x;\n    a.y -= b.y;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nRect_<_Tp>& operator += ( Rect_<_Tp>& a, const Size_<_Tp>& b )\n{\n    a.width += b.width;\n    a.height += b.height;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nRect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Size_<_Tp>& b )\n{\n    a.width -= b.width;\n    a.height -= b.height;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nRect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b )\n{\n    _Tp x1 = std::max(a.x, b.x);\n    _Tp y1 = std::max(a.y, b.y);\n    a.width = std::min(a.x + a.width, b.x + b.width) - x1;\n    a.height = std::min(a.y + a.height, b.y + b.height) - y1;\n    a.x = x1;\n    a.y = y1;\n    if( a.width <= 0 || a.height <= 0 )\n        a = Rect();\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nRect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b )\n{\n    _Tp x1 = std::min(a.x, b.x);\n    _Tp y1 = std::min(a.y, b.y);\n    a.width = std::max(a.x + a.width, b.x + b.width) - x1;\n    a.height = std::max(a.y + a.height, b.y + b.height) - y1;\n    a.x = x1;\n    a.y = y1;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nbool operator == (const Rect_<_Tp>& a, const Rect_<_Tp>& b)\n{\n    return a.x == b.x && a.y == b.y && a.width == b.width && a.height == b.height;\n}\n\ntemplate<typename _Tp> static inline\nbool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b)\n{\n    return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height;\n}\n\ntemplate<typename _Tp> static inline\nRect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b)\n{\n    return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height );\n}\n\ntemplate<typename _Tp> static inline\nRect_<_Tp> operator - (const Rect_<_Tp>& a, const Point_<_Tp>& b)\n{\n    return Rect_<_Tp>( a.x - b.x, a.y - b.y, a.width, a.height );\n}\n\ntemplate<typename _Tp> static inline\nRect_<_Tp> operator + (const Rect_<_Tp>& a, const Size_<_Tp>& b)\n{\n    return Rect_<_Tp>( a.x, a.y, a.width + b.width, a.height + b.height );\n}\n\ntemplate<typename _Tp> static inline\nRect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b)\n{\n    Rect_<_Tp> c = a;\n    return c &= b;\n}\n\ntemplate<typename _Tp> static inline\nRect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b)\n{\n    Rect_<_Tp> c = a;\n    return c |= b;\n}\n\n\n\n////////////////////////////// RotatedRect //////////////////////////////\n\ninline\nRotatedRect::RotatedRect()\n    : center(), size(), angle(0) {}\n\ninline\nRotatedRect::RotatedRect(const Point2f& _center, const Size2f& _size, float _angle)\n    : center(_center), size(_size), angle(_angle) {}\n\n\n\n///////////////////////////////// Range /////////////////////////////////\n\ninline\nRange::Range()\n    : start(0), end(0) {}\n\ninline\nRange::Range(int _start, int _end)\n    : start(_start), end(_end) {}\n\ninline\nint Range::size() const\n{\n    return end - start;\n}\n\ninline\nbool Range::empty() const\n{\n    return start == end;\n}\n\ninline\nRange Range::all()\n{\n    return Range(INT_MIN, INT_MAX);\n}\n\n\nstatic inline\nbool operator == (const Range& r1, const Range& r2)\n{\n    return r1.start == r2.start && r1.end == r2.end;\n}\n\nstatic inline\nbool operator != (const Range& r1, const Range& r2)\n{\n    return !(r1 == r2);\n}\n\nstatic inline\nbool operator !(const Range& r)\n{\n    return r.start == r.end;\n}\n\nstatic inline\nRange operator & (const Range& r1, const Range& r2)\n{\n    Range r(std::max(r1.start, r2.start), std::min(r1.end, r2.end));\n    r.end = std::max(r.end, r.start);\n    return r;\n}\n\nstatic inline\nRange& operator &= (Range& r1, const Range& r2)\n{\n    r1 = r1 & r2;\n    return r1;\n}\n\nstatic inline\nRange operator + (const Range& r1, int delta)\n{\n    return Range(r1.start + delta, r1.end + delta);\n}\n\nstatic inline\nRange operator + (int delta, const Range& r1)\n{\n    return Range(r1.start + delta, r1.end + delta);\n}\n\nstatic inline\nRange operator - (const Range& r1, int delta)\n{\n    return r1 + (-delta);\n}\n\n\n\n///////////////////////////////// Scalar ////////////////////////////////\n\ntemplate<typename _Tp> inline\nScalar_<_Tp>::Scalar_()\n{\n    this->val[0] = this->val[1] = this->val[2] = this->val[3] = 0;\n}\n\ntemplate<typename _Tp> inline\nScalar_<_Tp>::Scalar_(_Tp v0, _Tp v1, _Tp v2, _Tp v3)\n{\n    this->val[0] = v0;\n    this->val[1] = v1;\n    this->val[2] = v2;\n    this->val[3] = v3;\n}\n\ntemplate<typename _Tp> template<typename _Tp2, int cn> inline\nScalar_<_Tp>::Scalar_(const Vec<_Tp2, cn>& v)\n{\n    int i;\n    for( i = 0; i < (cn < 4 ? cn : 4); i++ )\n        this->val[i] = cv::saturate_cast<_Tp>(v.val[i]);\n    for( ; i < 4; i++ )\n        this->val[i] = 0;\n}\n\ntemplate<typename _Tp> inline\nScalar_<_Tp>::Scalar_(_Tp v0)\n{\n    this->val[0] = v0;\n    this->val[1] = this->val[2] = this->val[3] = 0;\n}\n\ntemplate<typename _Tp> inline\nScalar_<_Tp> Scalar_<_Tp>::all(_Tp v0)\n{\n    return Scalar_<_Tp>(v0, v0, v0, v0);\n}\n\n\ntemplate<typename _Tp> inline\nScalar_<_Tp> Scalar_<_Tp>::mul(const Scalar_<_Tp>& a, double scale ) const\n{\n    return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0] * a.val[0] * scale),\n                        saturate_cast<_Tp>(this->val[1] * a.val[1] * scale),\n                        saturate_cast<_Tp>(this->val[2] * a.val[2] * scale),\n                        saturate_cast<_Tp>(this->val[3] * a.val[3] * scale));\n}\n\ntemplate<typename _Tp> inline\nScalar_<_Tp> Scalar_<_Tp>::conj() const\n{\n    return Scalar_<_Tp>(saturate_cast<_Tp>( this->val[0]),\n                        saturate_cast<_Tp>(-this->val[1]),\n                        saturate_cast<_Tp>(-this->val[2]),\n                        saturate_cast<_Tp>(-this->val[3]));\n}\n\ntemplate<typename _Tp> inline\nbool Scalar_<_Tp>::isReal() const\n{\n    return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0;\n}\n\n\ntemplate<typename _Tp> template<typename T2> inline\nScalar_<_Tp>::operator Scalar_<T2>() const\n{\n    return Scalar_<T2>(saturate_cast<T2>(this->val[0]),\n                       saturate_cast<T2>(this->val[1]),\n                       saturate_cast<T2>(this->val[2]),\n                       saturate_cast<T2>(this->val[3]));\n}\n\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp>& operator += (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)\n{\n    a.val[0] += b.val[0];\n    a.val[1] += b.val[1];\n    a.val[2] += b.val[2];\n    a.val[3] += b.val[3];\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp>& operator -= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)\n{\n    a.val[0] -= b.val[0];\n    a.val[1] -= b.val[1];\n    a.val[2] -= b.val[2];\n    a.val[3] -= b.val[3];\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp>& operator *= ( Scalar_<_Tp>& a, _Tp v )\n{\n    a.val[0] *= v;\n    a.val[1] *= v;\n    a.val[2] *= v;\n    a.val[3] *= v;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nbool operator == ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b )\n{\n    return a.val[0] == b.val[0] && a.val[1] == b.val[1] &&\n           a.val[2] == b.val[2] && a.val[3] == b.val[3];\n}\n\ntemplate<typename _Tp> static inline\nbool operator != ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b )\n{\n    return a.val[0] != b.val[0] || a.val[1] != b.val[1] ||\n           a.val[2] != b.val[2] || a.val[3] != b.val[3];\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp> operator + (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)\n{\n    return Scalar_<_Tp>(a.val[0] + b.val[0],\n                        a.val[1] + b.val[1],\n                        a.val[2] + b.val[2],\n                        a.val[3] + b.val[3]);\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp> operator - (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)\n{\n    return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] - b.val[0]),\n                        saturate_cast<_Tp>(a.val[1] - b.val[1]),\n                        saturate_cast<_Tp>(a.val[2] - b.val[2]),\n                        saturate_cast<_Tp>(a.val[3] - b.val[3]));\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp> operator * (const Scalar_<_Tp>& a, _Tp alpha)\n{\n    return Scalar_<_Tp>(a.val[0] * alpha,\n                        a.val[1] * alpha,\n                        a.val[2] * alpha,\n                        a.val[3] * alpha);\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp> operator * (_Tp alpha, const Scalar_<_Tp>& a)\n{\n    return a*alpha;\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp> operator - (const Scalar_<_Tp>& a)\n{\n    return Scalar_<_Tp>(saturate_cast<_Tp>(-a.val[0]),\n                        saturate_cast<_Tp>(-a.val[1]),\n                        saturate_cast<_Tp>(-a.val[2]),\n                        saturate_cast<_Tp>(-a.val[3]));\n}\n\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp> operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)\n{\n    return Scalar_<_Tp>(saturate_cast<_Tp>(a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]),\n                        saturate_cast<_Tp>(a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]),\n                        saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] + a[3]*b[1]),\n                        saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + a[3]*b[0]));\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp>& operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)\n{\n    a = a * b;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha)\n{\n    return Scalar_<_Tp>(a.val[0] / alpha,\n                        a.val[1] / alpha,\n                        a.val[2] / alpha,\n                        a.val[3] / alpha);\n}\n\ntemplate<typename _Tp> static inline\nScalar_<float> operator / (const Scalar_<float>& a, float alpha)\n{\n    float s = 1 / alpha;\n    return Scalar_<float>(a.val[0] * s, a.val[1] * s, a.val[2] * s, a.val[3] * s);\n}\n\ntemplate<typename _Tp> static inline\nScalar_<double> operator / (const Scalar_<double>& a, double alpha)\n{\n    double s = 1 / alpha;\n    return Scalar_<double>(a.val[0] * s, a.val[1] * s, a.val[2] * s, a.val[3] * s);\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha)\n{\n    a = a / alpha;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b)\n{\n    _Tp s = a / (b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]);\n    return b.conj() * s;\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)\n{\n    return a * ((_Tp)1 / b);\n}\n\ntemplate<typename _Tp> static inline\nScalar_<_Tp>& operator /= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)\n{\n    a = a / b;\n    return a;\n}\n\ntemplate<typename _Tp> static inline\nScalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b)\n{\n    Matx<double, 4, 1> c((Matx<double, 4, 4>)a, b, Matx_MatMulOp());\n    return reinterpret_cast<const Scalar&>(c);\n}\n\ntemplate<> inline\nScalar operator * (const Matx<double, 4, 4>& a, const Scalar& b)\n{\n    Matx<double, 4, 1> c(a, b, Matx_MatMulOp());\n    return reinterpret_cast<const Scalar&>(c);\n}\n\n\n\n//////////////////////////////// KeyPoint ///////////////////////////////\n\ninline\nKeyPoint::KeyPoint()\n    : pt(0,0), size(0), angle(-1), response(0), octave(0), class_id(-1) {}\n\ninline\nKeyPoint::KeyPoint(Point2f _pt, float _size, float _angle, float _response, int _octave, int _class_id)\n    : pt(_pt), size(_size), angle(_angle), response(_response), octave(_octave), class_id(_class_id) {}\n\ninline\nKeyPoint::KeyPoint(float x, float y, float _size, float _angle, float _response, int _octave, int _class_id)\n    : pt(x, y), size(_size), angle(_angle), response(_response), octave(_octave), class_id(_class_id) {}\n\n\n\n///////////////////////////////// DMatch ////////////////////////////////\n\ninline\nDMatch::DMatch()\n    : queryIdx(-1), trainIdx(-1), imgIdx(-1), distance(FLT_MAX) {}\n\ninline\nDMatch::DMatch(int _queryIdx, int _trainIdx, float _distance)\n    : queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1), distance(_distance) {}\n\ninline\nDMatch::DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance)\n    : queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx), distance(_distance) {}\n\ninline\nbool DMatch::operator < (const DMatch &m) const\n{\n    return distance < m.distance;\n}\n\n\n\n////////////////////////////// TermCriteria /////////////////////////////\n\ninline\nTermCriteria::TermCriteria()\n    : type(0), maxCount(0), epsilon(0) {}\n\ninline\nTermCriteria::TermCriteria(int _type, int _maxCount, double _epsilon)\n    : type(_type), maxCount(_maxCount), epsilon(_epsilon) {}\n\n//! @endcond\n\n} // cv\n\n#endif //__OPENCV_CORE_TYPES_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/types_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_TYPES_H__\n#define __OPENCV_CORE_TYPES_H__\n\n#ifdef HAVE_IPL\n#  ifndef __IPL_H__\n#    if defined WIN32 || defined _WIN32\n#      include <ipl.h>\n#    else\n#      include <ipl/ipl.h>\n#    endif\n#  endif\n#elif defined __IPL_H__\n#  define HAVE_IPL\n#endif\n\n#include \"opencv2/core/cvdef.h\"\n\n#ifndef SKIP_INCLUDES\n#include <assert.h>\n#include <stdlib.h>\n#include <string.h>\n#include <float.h>\n#endif // SKIP_INCLUDES\n\n#if defined WIN32 || defined _WIN32\n#  define CV_CDECL __cdecl\n#  define CV_STDCALL __stdcall\n#else\n#  define CV_CDECL\n#  define CV_STDCALL\n#endif\n\n#ifndef CV_DEFAULT\n#  ifdef __cplusplus\n#    define CV_DEFAULT(val) = val\n#  else\n#    define CV_DEFAULT(val)\n#  endif\n#endif\n\n#ifndef CV_EXTERN_C_FUNCPTR\n#  ifdef __cplusplus\n#    define CV_EXTERN_C_FUNCPTR(x) extern \"C\" { typedef x; }\n#  else\n#    define CV_EXTERN_C_FUNCPTR(x) typedef x\n#  endif\n#endif\n\n#ifndef CVAPI\n#  define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL\n#endif\n\n#ifndef CV_IMPL\n#  define CV_IMPL CV_EXTERN_C\n#endif\n\n#ifdef __cplusplus\n#  include \"opencv2/core.hpp\"\n#endif\n\n/** @addtogroup core_c\n    @{\n*/\n\n/** @brief This is the \"metatype\" used *only* as a function parameter.\n\nIt denotes that the function accepts arrays of multiple types, such as IplImage*, CvMat* or even\nCvSeq* sometimes. The particular array type is determined at runtime by analyzing the first 4\nbytes of the header. In C++ interface the role of CvArr is played by InputArray and OutputArray.\n */\ntypedef void CvArr;\n\ntypedef int CVStatus;\n\n/** @see cv::Error::Code */\nenum {\n CV_StsOk=                       0,  /**< everything is ok                */\n CV_StsBackTrace=               -1,  /**< pseudo error for back trace     */\n CV_StsError=                   -2,  /**< unknown /unspecified error      */\n CV_StsInternal=                -3,  /**< internal error (bad state)      */\n CV_StsNoMem=                   -4,  /**< insufficient memory             */\n CV_StsBadArg=                  -5,  /**< function arg/param is bad       */\n CV_StsBadFunc=                 -6,  /**< unsupported function            */\n CV_StsNoConv=                  -7,  /**< iter. didn't converge           */\n CV_StsAutoTrace=               -8,  /**< tracing                         */\n CV_HeaderIsNull=               -9,  /**< image header is NULL            */\n CV_BadImageSize=              -10,  /**< image size is invalid           */\n CV_BadOffset=                 -11,  /**< offset is invalid               */\n CV_BadDataPtr=                -12,  /**/\n CV_BadStep=                   -13,  /**/\n CV_BadModelOrChSeq=           -14,  /**/\n CV_BadNumChannels=            -15,  /**/\n CV_BadNumChannel1U=           -16,  /**/\n CV_BadDepth=                  -17,  /**/\n CV_BadAlphaChannel=           -18,  /**/\n CV_BadOrder=                  -19,  /**/\n CV_BadOrigin=                 -20,  /**/\n CV_BadAlign=                  -21,  /**/\n CV_BadCallBack=               -22,  /**/\n CV_BadTileSize=               -23,  /**/\n CV_BadCOI=                    -24,  /**/\n CV_BadROISize=                -25,  /**/\n CV_MaskIsTiled=               -26,  /**/\n CV_StsNullPtr=                -27,  /**< null pointer */\n CV_StsVecLengthErr=           -28,  /**< incorrect vector length */\n CV_StsFilterStructContentErr= -29,  /**< incorr. filter structure content */\n CV_StsKernelStructContentErr= -30,  /**< incorr. transform kernel content */\n CV_StsFilterOffsetErr=        -31,  /**< incorrect filter offset value */\n CV_StsBadSize=                -201, /**< the input/output structure size is incorrect  */\n CV_StsDivByZero=              -202, /**< division by zero */\n CV_StsInplaceNotSupported=    -203, /**< in-place operation is not supported */\n CV_StsObjectNotFound=         -204, /**< request can't be completed */\n CV_StsUnmatchedFormats=       -205, /**< formats of input/output arrays differ */\n CV_StsBadFlag=                -206, /**< flag is wrong or not supported */\n CV_StsBadPoint=               -207, /**< bad CvPoint */\n CV_StsBadMask=                -208, /**< bad format of mask (neither 8uC1 nor 8sC1)*/\n CV_StsUnmatchedSizes=         -209, /**< sizes of input/output structures do not match */\n CV_StsUnsupportedFormat=      -210, /**< the data format/type is not supported by the function*/\n CV_StsOutOfRange=             -211, /**< some of parameters are out of range */\n CV_StsParseError=             -212, /**< invalid syntax/structure of the parsed file */\n CV_StsNotImplemented=         -213, /**< the requested function/feature is not implemented */\n CV_StsBadMemBlock=            -214, /**< an allocated block has been corrupted */\n CV_StsAssert=                 -215, /**< assertion failed */\n CV_GpuNotSupported=           -216,\n CV_GpuApiCallError=           -217,\n CV_OpenGlNotSupported=        -218,\n CV_OpenGlApiCallError=        -219,\n CV_OpenCLApiCallError=        -220,\n CV_OpenCLDoubleNotSupported=  -221,\n CV_OpenCLInitError=           -222,\n CV_OpenCLNoAMDBlasFft=        -223\n};\n\n/****************************************************************************************\\\n*                             Common macros and inline functions                         *\n\\****************************************************************************************/\n\n#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t))\n\n/** min & max without jumps */\n#define  CV_IMIN(a, b)  ((a) ^ (((a)^(b)) & (((a) < (b)) - 1)))\n\n#define  CV_IMAX(a, b)  ((a) ^ (((a)^(b)) & (((a) > (b)) - 1)))\n\n/** absolute value without jumps */\n#ifndef __cplusplus\n#  define  CV_IABS(a)     (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0))\n#else\n#  define  CV_IABS(a)     abs(a)\n#endif\n#define  CV_CMP(a,b)    (((a) > (b)) - ((a) < (b)))\n#define  CV_SIGN(a)     CV_CMP((a),0)\n\n#define cvInvSqrt(value) ((float)(1./sqrt(value)))\n#define cvSqrt(value)  ((float)sqrt(value))\n\n\n/*************** Random number generation *******************/\n\ntypedef uint64 CvRNG;\n\n#define CV_RNG_COEFF 4164903690U\n\n/** @brief Initializes a random number generator state.\n\nThe function initializes a random number generator and returns the state. The pointer to the state\ncan be then passed to the cvRandInt, cvRandReal and cvRandArr functions. In the current\nimplementation a multiply-with-carry generator is used.\n@param seed 64-bit value used to initiate a random sequence\n@sa the C++ class RNG replaced CvRNG.\n */\nCV_INLINE CvRNG cvRNG( int64 seed CV_DEFAULT(-1))\n{\n    CvRNG rng = seed ? (uint64)seed : (uint64)(int64)-1;\n    return rng;\n}\n\n/** @brief Returns a 32-bit unsigned integer and updates RNG.\n\nThe function returns a uniformly-distributed random 32-bit unsigned integer and updates the RNG\nstate. It is similar to the rand() function from the C runtime library, except that OpenCV functions\nalways generates a 32-bit random number, regardless of the platform.\n@param rng CvRNG state initialized by cvRNG.\n */\nCV_INLINE unsigned cvRandInt( CvRNG* rng )\n{\n    uint64 temp = *rng;\n    temp = (uint64)(unsigned)temp*CV_RNG_COEFF + (temp >> 32);\n    *rng = temp;\n    return (unsigned)temp;\n}\n\n/** @brief Returns a floating-point random number and updates RNG.\n\nThe function returns a uniformly-distributed random floating-point number between 0 and 1 (1 is not\nincluded).\n@param rng RNG state initialized by cvRNG\n */\nCV_INLINE double cvRandReal( CvRNG* rng )\n{\n    return cvRandInt(rng)*2.3283064365386962890625e-10 /* 2^-32 */;\n}\n\n/****************************************************************************************\\\n*                                  Image type (IplImage)                                 *\n\\****************************************************************************************/\n\n#ifndef HAVE_IPL\n\n/*\n * The following definitions (until #endif)\n * is an extract from IPL headers.\n * Copyright (c) 1995 Intel Corporation.\n */\n#define IPL_DEPTH_SIGN 0x80000000\n\n#define IPL_DEPTH_1U     1\n#define IPL_DEPTH_8U     8\n#define IPL_DEPTH_16U   16\n#define IPL_DEPTH_32F   32\n\n#define IPL_DEPTH_8S  (IPL_DEPTH_SIGN| 8)\n#define IPL_DEPTH_16S (IPL_DEPTH_SIGN|16)\n#define IPL_DEPTH_32S (IPL_DEPTH_SIGN|32)\n\n#define IPL_DATA_ORDER_PIXEL  0\n#define IPL_DATA_ORDER_PLANE  1\n\n#define IPL_ORIGIN_TL 0\n#define IPL_ORIGIN_BL 1\n\n#define IPL_ALIGN_4BYTES   4\n#define IPL_ALIGN_8BYTES   8\n#define IPL_ALIGN_16BYTES 16\n#define IPL_ALIGN_32BYTES 32\n\n#define IPL_ALIGN_DWORD   IPL_ALIGN_4BYTES\n#define IPL_ALIGN_QWORD   IPL_ALIGN_8BYTES\n\n#define IPL_BORDER_CONSTANT   0\n#define IPL_BORDER_REPLICATE  1\n#define IPL_BORDER_REFLECT    2\n#define IPL_BORDER_WRAP       3\n\n/** The IplImage is taken from the Intel Image Processing Library, in which the format is native. OpenCV\nonly supports a subset of possible IplImage formats, as outlined in the parameter list above.\n\nIn addition to the above restrictions, OpenCV handles ROIs differently. OpenCV functions require\nthat the image size or ROI size of all source and destination images match exactly. On the other\nhand, the Intel Image Processing Library processes the area of intersection between the source and\ndestination images (or ROIs), allowing them to vary independently.\n*/\ntypedef struct\n#ifdef __cplusplus\n  CV_EXPORTS\n#endif\n_IplImage\n{\n    int  nSize;             /**< sizeof(IplImage) */\n    int  ID;                /**< version (=0)*/\n    int  nChannels;         /**< Most of OpenCV functions support 1,2,3 or 4 channels */\n    int  alphaChannel;      /**< Ignored by OpenCV */\n    int  depth;             /**< Pixel depth in bits: IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16S,\n                               IPL_DEPTH_32S, IPL_DEPTH_32F and IPL_DEPTH_64F are supported.  */\n    char colorModel[4];     /**< Ignored by OpenCV */\n    char channelSeq[4];     /**< ditto */\n    int  dataOrder;         /**< 0 - interleaved color channels, 1 - separate color channels.\n                               cvCreateImage can only create interleaved images */\n    int  origin;            /**< 0 - top-left origin,\n                               1 - bottom-left origin (Windows bitmaps style).  */\n    int  align;             /**< Alignment of image rows (4 or 8).\n                               OpenCV ignores it and uses widthStep instead.    */\n    int  width;             /**< Image width in pixels.                           */\n    int  height;            /**< Image height in pixels.                          */\n    struct _IplROI *roi;    /**< Image ROI. If NULL, the whole image is selected. */\n    struct _IplImage *maskROI;      /**< Must be NULL. */\n    void  *imageId;                 /**< \"           \" */\n    struct _IplTileInfo *tileInfo;  /**< \"           \" */\n    int  imageSize;         /**< Image data size in bytes\n                               (==image->height*image->widthStep\n                               in case of interleaved data)*/\n    char *imageData;        /**< Pointer to aligned image data.         */\n    int  widthStep;         /**< Size of aligned image row in bytes.    */\n    int  BorderMode[4];     /**< Ignored by OpenCV.                     */\n    int  BorderConst[4];    /**< Ditto.                                 */\n    char *imageDataOrigin;  /**< Pointer to very origin of image data\n                               (not necessarily aligned) -\n                               needed for correct deallocation */\n\n#ifdef __cplusplus\n    _IplImage() {}\n    _IplImage(const cv::Mat& m);\n#endif\n}\nIplImage;\n\ntypedef struct _IplTileInfo IplTileInfo;\n\ntypedef struct _IplROI\n{\n    int  coi; /**< 0 - no COI (all channels are selected), 1 - 0th channel is selected ...*/\n    int  xOffset;\n    int  yOffset;\n    int  width;\n    int  height;\n}\nIplROI;\n\ntypedef struct _IplConvKernel\n{\n    int  nCols;\n    int  nRows;\n    int  anchorX;\n    int  anchorY;\n    int *values;\n    int  nShiftR;\n}\nIplConvKernel;\n\ntypedef struct _IplConvKernelFP\n{\n    int  nCols;\n    int  nRows;\n    int  anchorX;\n    int  anchorY;\n    float *values;\n}\nIplConvKernelFP;\n\n#define IPL_IMAGE_HEADER 1\n#define IPL_IMAGE_DATA   2\n#define IPL_IMAGE_ROI    4\n\n#endif/*HAVE_IPL*/\n\n/** extra border mode */\n#define IPL_BORDER_REFLECT_101    4\n#define IPL_BORDER_TRANSPARENT    5\n\n#define IPL_IMAGE_MAGIC_VAL  ((int)sizeof(IplImage))\n#define CV_TYPE_NAME_IMAGE \"opencv-image\"\n\n#define CV_IS_IMAGE_HDR(img) \\\n    ((img) != NULL && ((const IplImage*)(img))->nSize == sizeof(IplImage))\n\n#define CV_IS_IMAGE(img) \\\n    (CV_IS_IMAGE_HDR(img) && ((IplImage*)img)->imageData != NULL)\n\n/** for storing double-precision\n   floating point data in IplImage's */\n#define IPL_DEPTH_64F  64\n\n/** get reference to pixel at (col,row),\n   for multi-channel images (col) should be multiplied by number of channels */\n#define CV_IMAGE_ELEM( image, elemtype, row, col )       \\\n    (((elemtype*)((image)->imageData + (image)->widthStep*(row)))[(col)])\n\n/****************************************************************************************\\\n*                                  Matrix type (CvMat)                                   *\n\\****************************************************************************************/\n\n#define CV_AUTO_STEP  0x7fffffff\n#define CV_WHOLE_ARR  cvSlice( 0, 0x3fffffff )\n\n#define CV_MAGIC_MASK       0xFFFF0000\n#define CV_MAT_MAGIC_VAL    0x42420000\n#define CV_TYPE_NAME_MAT    \"opencv-matrix\"\n\n/** Matrix elements are stored row by row. Element (i, j) (i - 0-based row index, j - 0-based column\nindex) of a matrix can be retrieved or modified using CV_MAT_ELEM macro:\n\n    uchar pixval = CV_MAT_ELEM(grayimg, uchar, i, j)\n    CV_MAT_ELEM(cameraMatrix, float, 0, 2) = image.width*0.5f;\n\nTo access multiple-channel matrices, you can use\nCV_MAT_ELEM(matrix, type, i, j\\*nchannels + channel_idx).\n\n@deprecated CvMat is now obsolete; consider using Mat instead.\n */\ntypedef struct CvMat\n{\n    int type;\n    int step;\n\n    /* for internal use only */\n    int* refcount;\n    int hdr_refcount;\n\n    union\n    {\n        uchar* ptr;\n        short* s;\n        int* i;\n        float* fl;\n        double* db;\n    } data;\n\n#ifdef __cplusplus\n    union\n    {\n        int rows;\n        int height;\n    };\n\n    union\n    {\n        int cols;\n        int width;\n    };\n#else\n    int rows;\n    int cols;\n#endif\n\n\n#ifdef __cplusplus\n    CvMat() {}\n    CvMat(const CvMat& m) { memcpy(this, &m, sizeof(CvMat));}\n    CvMat(const cv::Mat& m);\n#endif\n\n}\nCvMat;\n\n\n#define CV_IS_MAT_HDR(mat) \\\n    ((mat) != NULL && \\\n    (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \\\n    ((const CvMat*)(mat))->cols > 0 && ((const CvMat*)(mat))->rows > 0)\n\n#define CV_IS_MAT_HDR_Z(mat) \\\n    ((mat) != NULL && \\\n    (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \\\n    ((const CvMat*)(mat))->cols >= 0 && ((const CvMat*)(mat))->rows >= 0)\n\n#define CV_IS_MAT(mat) \\\n    (CV_IS_MAT_HDR(mat) && ((const CvMat*)(mat))->data.ptr != NULL)\n\n#define CV_IS_MASK_ARR(mat) \\\n    (((mat)->type & (CV_MAT_TYPE_MASK & ~CV_8SC1)) == 0)\n\n#define CV_ARE_TYPES_EQ(mat1, mat2) \\\n    ((((mat1)->type ^ (mat2)->type) & CV_MAT_TYPE_MASK) == 0)\n\n#define CV_ARE_CNS_EQ(mat1, mat2) \\\n    ((((mat1)->type ^ (mat2)->type) & CV_MAT_CN_MASK) == 0)\n\n#define CV_ARE_DEPTHS_EQ(mat1, mat2) \\\n    ((((mat1)->type ^ (mat2)->type) & CV_MAT_DEPTH_MASK) == 0)\n\n#define CV_ARE_SIZES_EQ(mat1, mat2) \\\n    ((mat1)->rows == (mat2)->rows && (mat1)->cols == (mat2)->cols)\n\n#define CV_IS_MAT_CONST(mat)  \\\n    (((mat)->rows|(mat)->cols) == 1)\n\n#define IPL2CV_DEPTH(depth) \\\n    ((((CV_8U)+(CV_16U<<4)+(CV_32F<<8)+(CV_64F<<16)+(CV_8S<<20)+ \\\n    (CV_16S<<24)+(CV_32S<<28)) >> ((((depth) & 0xF0) >> 2) + \\\n    (((depth) & IPL_DEPTH_SIGN) ? 20 : 0))) & 15)\n\n/** Inline constructor. No data is allocated internally!!!\n * (Use together with cvCreateData, or use cvCreateMat instead to\n * get a matrix with allocated data):\n */\nCV_INLINE CvMat cvMat( int rows, int cols, int type, void* data CV_DEFAULT(NULL))\n{\n    CvMat m;\n\n    assert( (unsigned)CV_MAT_DEPTH(type) <= CV_64F );\n    type = CV_MAT_TYPE(type);\n    m.type = CV_MAT_MAGIC_VAL | CV_MAT_CONT_FLAG | type;\n    m.cols = cols;\n    m.rows = rows;\n    m.step = m.cols*CV_ELEM_SIZE(type);\n    m.data.ptr = (uchar*)data;\n    m.refcount = NULL;\n    m.hdr_refcount = 0;\n\n    return m;\n}\n\n#ifdef __cplusplus\ninline CvMat::CvMat(const cv::Mat& m)\n{\n    CV_DbgAssert(m.dims <= 2);\n    *this = cvMat(m.rows, m.dims == 1 ? 1 : m.cols, m.type(), m.data);\n    step = (int)m.step[0];\n    type = (type & ~cv::Mat::CONTINUOUS_FLAG) | (m.flags & cv::Mat::CONTINUOUS_FLAG);\n}\n#endif\n\n\n#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size )  \\\n    (assert( (unsigned)(row) < (unsigned)(mat).rows &&   \\\n             (unsigned)(col) < (unsigned)(mat).cols ),   \\\n     (mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col))\n\n#define CV_MAT_ELEM_PTR( mat, row, col )                 \\\n    CV_MAT_ELEM_PTR_FAST( mat, row, col, CV_ELEM_SIZE((mat).type) )\n\n#define CV_MAT_ELEM( mat, elemtype, row, col )           \\\n    (*(elemtype*)CV_MAT_ELEM_PTR_FAST( mat, row, col, sizeof(elemtype)))\n\n/** @brief Returns the particular element of single-channel floating-point matrix.\n\nThe function is a fast replacement for cvGetReal2D in the case of single-channel floating-point\nmatrices. It is faster because it is inline, it does fewer checks for array type and array element\ntype, and it checks for the row and column ranges only in debug mode.\n@param mat Input matrix\n@param row The zero-based index of row\n@param col The zero-based index of column\n */\nCV_INLINE  double  cvmGet( const CvMat* mat, int row, int col )\n{\n    int type;\n\n    type = CV_MAT_TYPE(mat->type);\n    assert( (unsigned)row < (unsigned)mat->rows &&\n            (unsigned)col < (unsigned)mat->cols );\n\n    if( type == CV_32FC1 )\n        return ((float*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col];\n    else\n    {\n        assert( type == CV_64FC1 );\n        return ((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col];\n    }\n}\n\n/** @brief Sets a specific element of a single-channel floating-point matrix.\n\nThe function is a fast replacement for cvSetReal2D in the case of single-channel floating-point\nmatrices. It is faster because it is inline, it does fewer checks for array type and array element\ntype, and it checks for the row and column ranges only in debug mode.\n@param mat The matrix\n@param row The zero-based index of row\n@param col The zero-based index of column\n@param value The new value of the matrix element\n */\nCV_INLINE  void  cvmSet( CvMat* mat, int row, int col, double value )\n{\n    int type;\n    type = CV_MAT_TYPE(mat->type);\n    assert( (unsigned)row < (unsigned)mat->rows &&\n            (unsigned)col < (unsigned)mat->cols );\n\n    if( type == CV_32FC1 )\n        ((float*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = (float)value;\n    else\n    {\n        assert( type == CV_64FC1 );\n        ((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = value;\n    }\n}\n\n\nCV_INLINE int cvIplDepth( int type )\n{\n    int depth = CV_MAT_DEPTH(type);\n    return CV_ELEM_SIZE1(depth)*8 | (depth == CV_8S || depth == CV_16S ||\n           depth == CV_32S ? IPL_DEPTH_SIGN : 0);\n}\n\n\n/****************************************************************************************\\\n*                       Multi-dimensional dense array (CvMatND)                          *\n\\****************************************************************************************/\n\n#define CV_MATND_MAGIC_VAL    0x42430000\n#define CV_TYPE_NAME_MATND    \"opencv-nd-matrix\"\n\n#define CV_MAX_DIM            32\n#define CV_MAX_DIM_HEAP       1024\n\n/**\n  @deprecated consider using cv::Mat instead\n  */\ntypedef struct\n#ifdef __cplusplus\n  CV_EXPORTS\n#endif\nCvMatND\n{\n    int type;\n    int dims;\n\n    int* refcount;\n    int hdr_refcount;\n\n    union\n    {\n        uchar* ptr;\n        float* fl;\n        double* db;\n        int* i;\n        short* s;\n    } data;\n\n    struct\n    {\n        int size;\n        int step;\n    }\n    dim[CV_MAX_DIM];\n\n#ifdef __cplusplus\n    CvMatND() {}\n    CvMatND(const cv::Mat& m);\n#endif\n}\nCvMatND;\n\n#define CV_IS_MATND_HDR(mat) \\\n    ((mat) != NULL && (((const CvMatND*)(mat))->type & CV_MAGIC_MASK) == CV_MATND_MAGIC_VAL)\n\n#define CV_IS_MATND(mat) \\\n    (CV_IS_MATND_HDR(mat) && ((const CvMatND*)(mat))->data.ptr != NULL)\n\n\n/****************************************************************************************\\\n*                      Multi-dimensional sparse array (CvSparseMat)                      *\n\\****************************************************************************************/\n\n#define CV_SPARSE_MAT_MAGIC_VAL    0x42440000\n#define CV_TYPE_NAME_SPARSE_MAT    \"opencv-sparse-matrix\"\n\nstruct CvSet;\n\ntypedef struct\n#ifdef __cplusplus\n  CV_EXPORTS\n#endif\nCvSparseMat\n{\n    int type;\n    int dims;\n    int* refcount;\n    int hdr_refcount;\n\n    struct CvSet* heap;\n    void** hashtable;\n    int hashsize;\n    int valoffset;\n    int idxoffset;\n    int size[CV_MAX_DIM];\n\n#ifdef __cplusplus\n    void copyToSparseMat(cv::SparseMat& m) const;\n#endif\n}\nCvSparseMat;\n\n#ifdef __cplusplus\n    CV_EXPORTS CvSparseMat* cvCreateSparseMat(const cv::SparseMat& m);\n#endif\n\n#define CV_IS_SPARSE_MAT_HDR(mat) \\\n    ((mat) != NULL && \\\n    (((const CvSparseMat*)(mat))->type & CV_MAGIC_MASK) == CV_SPARSE_MAT_MAGIC_VAL)\n\n#define CV_IS_SPARSE_MAT(mat) \\\n    CV_IS_SPARSE_MAT_HDR(mat)\n\n/**************** iteration through a sparse array *****************/\n\ntypedef struct CvSparseNode\n{\n    unsigned hashval;\n    struct CvSparseNode* next;\n}\nCvSparseNode;\n\ntypedef struct CvSparseMatIterator\n{\n    CvSparseMat* mat;\n    CvSparseNode* node;\n    int curidx;\n}\nCvSparseMatIterator;\n\n#define CV_NODE_VAL(mat,node)   ((void*)((uchar*)(node) + (mat)->valoffset))\n#define CV_NODE_IDX(mat,node)   ((int*)((uchar*)(node) + (mat)->idxoffset))\n\n/****************************************************************************************\\\n*                                         Histogram                                      *\n\\****************************************************************************************/\n\ntypedef int CvHistType;\n\n#define CV_HIST_MAGIC_VAL     0x42450000\n#define CV_HIST_UNIFORM_FLAG  (1 << 10)\n\n/** indicates whether bin ranges are set already or not */\n#define CV_HIST_RANGES_FLAG   (1 << 11)\n\n#define CV_HIST_ARRAY         0\n#define CV_HIST_SPARSE        1\n#define CV_HIST_TREE          CV_HIST_SPARSE\n\n/** should be used as a parameter only,\n   it turns to CV_HIST_UNIFORM_FLAG of hist->type */\n#define CV_HIST_UNIFORM       1\n\ntypedef struct CvHistogram\n{\n    int     type;\n    CvArr*  bins;\n    float   thresh[CV_MAX_DIM][2];  /**< For uniform histograms.                      */\n    float** thresh2;                /**< For non-uniform histograms.                  */\n    CvMatND mat;                    /**< Embedded matrix header for array histograms. */\n}\nCvHistogram;\n\n#define CV_IS_HIST( hist ) \\\n    ((hist) != NULL  && \\\n     (((CvHistogram*)(hist))->type & CV_MAGIC_MASK) == CV_HIST_MAGIC_VAL && \\\n     (hist)->bins != NULL)\n\n#define CV_IS_UNIFORM_HIST( hist ) \\\n    (((hist)->type & CV_HIST_UNIFORM_FLAG) != 0)\n\n#define CV_IS_SPARSE_HIST( hist ) \\\n    CV_IS_SPARSE_MAT((hist)->bins)\n\n#define CV_HIST_HAS_RANGES( hist ) \\\n    (((hist)->type & CV_HIST_RANGES_FLAG) != 0)\n\n/****************************************************************************************\\\n*                      Other supplementary data type definitions                         *\n\\****************************************************************************************/\n\n/*************************************** CvRect *****************************************/\n/** @sa Rect_ */\ntypedef struct CvRect\n{\n    int x;\n    int y;\n    int width;\n    int height;\n\n#ifdef __cplusplus\n    CvRect(int _x = 0, int _y = 0, int w = 0, int h = 0): x(_x), y(_y), width(w), height(h) {}\n    template<typename _Tp>\n    CvRect(const cv::Rect_<_Tp>& r): x(cv::saturate_cast<int>(r.x)), y(cv::saturate_cast<int>(r.y)), width(cv::saturate_cast<int>(r.width)), height(cv::saturate_cast<int>(r.height)) {}\n    template<typename _Tp>\n    operator cv::Rect_<_Tp>() const { return cv::Rect_<_Tp>((_Tp)x, (_Tp)y, (_Tp)width, (_Tp)height); }\n#endif\n}\nCvRect;\n\n/** constructs CvRect structure. */\nCV_INLINE  CvRect  cvRect( int x, int y, int width, int height )\n{\n    CvRect r;\n\n    r.x = x;\n    r.y = y;\n    r.width = width;\n    r.height = height;\n\n    return r;\n}\n\n\nCV_INLINE  IplROI  cvRectToROI( CvRect rect, int coi )\n{\n    IplROI roi;\n    roi.xOffset = rect.x;\n    roi.yOffset = rect.y;\n    roi.width = rect.width;\n    roi.height = rect.height;\n    roi.coi = coi;\n\n    return roi;\n}\n\n\nCV_INLINE  CvRect  cvROIToRect( IplROI roi )\n{\n    return cvRect( roi.xOffset, roi.yOffset, roi.width, roi.height );\n}\n\n/*********************************** CvTermCriteria *************************************/\n\n#define CV_TERMCRIT_ITER    1\n#define CV_TERMCRIT_NUMBER  CV_TERMCRIT_ITER\n#define CV_TERMCRIT_EPS     2\n\n/** @sa TermCriteria\n */\ntypedef struct CvTermCriteria\n{\n    int    type;  /**< may be combination of\n                     CV_TERMCRIT_ITER\n                     CV_TERMCRIT_EPS */\n    int    max_iter;\n    double epsilon;\n\n#ifdef __cplusplus\n    CvTermCriteria(int _type = 0, int _iter = 0, double _eps = 0) : type(_type), max_iter(_iter), epsilon(_eps)  {}\n    CvTermCriteria(const cv::TermCriteria& t) : type(t.type), max_iter(t.maxCount), epsilon(t.epsilon)  {}\n    operator cv::TermCriteria() const { return cv::TermCriteria(type, max_iter, epsilon); }\n#endif\n\n}\nCvTermCriteria;\n\nCV_INLINE  CvTermCriteria  cvTermCriteria( int type, int max_iter, double epsilon )\n{\n    CvTermCriteria t;\n\n    t.type = type;\n    t.max_iter = max_iter;\n    t.epsilon = (float)epsilon;\n\n    return t;\n}\n\n\n/******************************* CvPoint and variants ***********************************/\n\ntypedef struct CvPoint\n{\n    int x;\n    int y;\n\n#ifdef __cplusplus\n    CvPoint(int _x = 0, int _y = 0): x(_x), y(_y) {}\n    template<typename _Tp>\n    CvPoint(const cv::Point_<_Tp>& pt): x((int)pt.x), y((int)pt.y) {}\n    template<typename _Tp>\n    operator cv::Point_<_Tp>() const { return cv::Point_<_Tp>(cv::saturate_cast<_Tp>(x), cv::saturate_cast<_Tp>(y)); }\n#endif\n}\nCvPoint;\n\n/** constructs CvPoint structure. */\nCV_INLINE  CvPoint  cvPoint( int x, int y )\n{\n    CvPoint p;\n\n    p.x = x;\n    p.y = y;\n\n    return p;\n}\n\n\ntypedef struct CvPoint2D32f\n{\n    float x;\n    float y;\n\n#ifdef __cplusplus\n    CvPoint2D32f(float _x = 0, float _y = 0): x(_x), y(_y) {}\n    template<typename _Tp>\n    CvPoint2D32f(const cv::Point_<_Tp>& pt): x((float)pt.x), y((float)pt.y) {}\n    template<typename _Tp>\n    operator cv::Point_<_Tp>() const { return cv::Point_<_Tp>(cv::saturate_cast<_Tp>(x), cv::saturate_cast<_Tp>(y)); }\n#endif\n}\nCvPoint2D32f;\n\n/** constructs CvPoint2D32f structure. */\nCV_INLINE  CvPoint2D32f  cvPoint2D32f( double x, double y )\n{\n    CvPoint2D32f p;\n\n    p.x = (float)x;\n    p.y = (float)y;\n\n    return p;\n}\n\n/** converts CvPoint to CvPoint2D32f. */\nCV_INLINE  CvPoint2D32f  cvPointTo32f( CvPoint point )\n{\n    return cvPoint2D32f( (float)point.x, (float)point.y );\n}\n\n/** converts CvPoint2D32f to CvPoint. */\nCV_INLINE  CvPoint  cvPointFrom32f( CvPoint2D32f point )\n{\n    CvPoint ipt;\n    ipt.x = cvRound(point.x);\n    ipt.y = cvRound(point.y);\n\n    return ipt;\n}\n\n\ntypedef struct CvPoint3D32f\n{\n    float x;\n    float y;\n    float z;\n\n#ifdef __cplusplus\n    CvPoint3D32f(float _x = 0, float _y = 0, float _z = 0): x(_x), y(_y), z(_z) {}\n    template<typename _Tp>\n    CvPoint3D32f(const cv::Point3_<_Tp>& pt): x((float)pt.x), y((float)pt.y), z((float)pt.z) {}\n    template<typename _Tp>\n    operator cv::Point3_<_Tp>() const { return cv::Point3_<_Tp>(cv::saturate_cast<_Tp>(x), cv::saturate_cast<_Tp>(y), cv::saturate_cast<_Tp>(z)); }\n#endif\n}\nCvPoint3D32f;\n\n/** constructs CvPoint3D32f structure. */\nCV_INLINE  CvPoint3D32f  cvPoint3D32f( double x, double y, double z )\n{\n    CvPoint3D32f p;\n\n    p.x = (float)x;\n    p.y = (float)y;\n    p.z = (float)z;\n\n    return p;\n}\n\n\ntypedef struct CvPoint2D64f\n{\n    double x;\n    double y;\n}\nCvPoint2D64f;\n\n/** constructs CvPoint2D64f structure.*/\nCV_INLINE  CvPoint2D64f  cvPoint2D64f( double x, double y )\n{\n    CvPoint2D64f p;\n\n    p.x = x;\n    p.y = y;\n\n    return p;\n}\n\n\ntypedef struct CvPoint3D64f\n{\n    double x;\n    double y;\n    double z;\n}\nCvPoint3D64f;\n\n/** constructs CvPoint3D64f structure. */\nCV_INLINE  CvPoint3D64f  cvPoint3D64f( double x, double y, double z )\n{\n    CvPoint3D64f p;\n\n    p.x = x;\n    p.y = y;\n    p.z = z;\n\n    return p;\n}\n\n\n/******************************** CvSize's & CvBox **************************************/\n\ntypedef struct CvSize\n{\n    int width;\n    int height;\n\n#ifdef __cplusplus\n    CvSize(int w = 0, int h = 0): width(w), height(h) {}\n    template<typename _Tp>\n    CvSize(const cv::Size_<_Tp>& sz): width(cv::saturate_cast<int>(sz.width)), height(cv::saturate_cast<int>(sz.height)) {}\n    template<typename _Tp>\n    operator cv::Size_<_Tp>() const { return cv::Size_<_Tp>(cv::saturate_cast<_Tp>(width), cv::saturate_cast<_Tp>(height)); }\n#endif\n}\nCvSize;\n\n/** constructs CvSize structure. */\nCV_INLINE  CvSize  cvSize( int width, int height )\n{\n    CvSize s;\n\n    s.width = width;\n    s.height = height;\n\n    return s;\n}\n\ntypedef struct CvSize2D32f\n{\n    float width;\n    float height;\n\n#ifdef __cplusplus\n    CvSize2D32f(float w = 0, float h = 0): width(w), height(h) {}\n    template<typename _Tp>\n    CvSize2D32f(const cv::Size_<_Tp>& sz): width(cv::saturate_cast<float>(sz.width)), height(cv::saturate_cast<float>(sz.height)) {}\n    template<typename _Tp>\n    operator cv::Size_<_Tp>() const { return cv::Size_<_Tp>(cv::saturate_cast<_Tp>(width), cv::saturate_cast<_Tp>(height)); }\n#endif\n}\nCvSize2D32f;\n\n/** constructs CvSize2D32f structure. */\nCV_INLINE  CvSize2D32f  cvSize2D32f( double width, double height )\n{\n    CvSize2D32f s;\n\n    s.width = (float)width;\n    s.height = (float)height;\n\n    return s;\n}\n\n/** @sa RotatedRect\n */\ntypedef struct CvBox2D\n{\n    CvPoint2D32f center;  /**< Center of the box.                          */\n    CvSize2D32f  size;    /**< Box width and length.                       */\n    float angle;          /**< Angle between the horizontal axis           */\n                          /**< and the first side (i.e. length) in degrees */\n\n#ifdef __cplusplus\n    CvBox2D(CvPoint2D32f c = CvPoint2D32f(), CvSize2D32f s = CvSize2D32f(), float a = 0) : center(c), size(s), angle(a) {}\n    CvBox2D(const cv::RotatedRect& rr) : center(rr.center), size(rr.size), angle(rr.angle) {}\n    operator cv::RotatedRect() const { return cv::RotatedRect(center, size, angle); }\n#endif\n}\nCvBox2D;\n\n\n/** Line iterator state: */\ntypedef struct CvLineIterator\n{\n    /** Pointer to the current point: */\n    uchar* ptr;\n\n    /* Bresenham algorithm state: */\n    int  err;\n    int  plus_delta;\n    int  minus_delta;\n    int  plus_step;\n    int  minus_step;\n}\nCvLineIterator;\n\n\n\n/************************************* CvSlice ******************************************/\n#define CV_WHOLE_SEQ_END_INDEX 0x3fffffff\n#define CV_WHOLE_SEQ  cvSlice(0, CV_WHOLE_SEQ_END_INDEX)\n\ntypedef struct CvSlice\n{\n    int  start_index, end_index;\n\n#if defined(__cplusplus) && !defined(__CUDACC__)\n    CvSlice(int start = 0, int end = 0) : start_index(start), end_index(end) {}\n    CvSlice(const cv::Range& r) { *this = (r.start != INT_MIN && r.end != INT_MAX) ? CvSlice(r.start, r.end) : CvSlice(0, CV_WHOLE_SEQ_END_INDEX); }\n    operator cv::Range() const { return (start_index == 0 && end_index == CV_WHOLE_SEQ_END_INDEX ) ? cv::Range::all() : cv::Range(start_index, end_index); }\n#endif\n}\nCvSlice;\n\nCV_INLINE  CvSlice  cvSlice( int start, int end )\n{\n    CvSlice slice;\n    slice.start_index = start;\n    slice.end_index = end;\n\n    return slice;\n}\n\n\n\n/************************************* CvScalar *****************************************/\n/** @sa Scalar_\n */\ntypedef struct CvScalar\n{\n    double val[4];\n\n#ifdef __cplusplus\n    CvScalar() {}\n    CvScalar(double d0, double d1 = 0, double d2 = 0, double d3 = 0) { val[0] = d0; val[1] = d1; val[2] = d2; val[3] = d3; }\n    template<typename _Tp>\n    CvScalar(const cv::Scalar_<_Tp>& s) { val[0] = s.val[0]; val[1] = s.val[1]; val[2] = s.val[2]; val[3] = s.val[3]; }\n    template<typename _Tp>\n    operator cv::Scalar_<_Tp>() const { return cv::Scalar_<_Tp>(cv::saturate_cast<_Tp>(val[0]), cv::saturate_cast<_Tp>(val[1]), cv::saturate_cast<_Tp>(val[2]), cv::saturate_cast<_Tp>(val[3])); }\n    template<typename _Tp, int cn>\n    CvScalar(const cv::Vec<_Tp, cn>& v)\n    {\n        int i;\n        for( i = 0; i < (cn < 4 ? cn : 4); i++ ) val[i] = v.val[i];\n        for( ; i < 4; i++ ) val[i] = 0;\n    }\n#endif\n}\nCvScalar;\n\nCV_INLINE  CvScalar  cvScalar( double val0, double val1 CV_DEFAULT(0),\n                               double val2 CV_DEFAULT(0), double val3 CV_DEFAULT(0))\n{\n    CvScalar scalar;\n    scalar.val[0] = val0; scalar.val[1] = val1;\n    scalar.val[2] = val2; scalar.val[3] = val3;\n    return scalar;\n}\n\n\nCV_INLINE  CvScalar  cvRealScalar( double val0 )\n{\n    CvScalar scalar;\n    scalar.val[0] = val0;\n    scalar.val[1] = scalar.val[2] = scalar.val[3] = 0;\n    return scalar;\n}\n\nCV_INLINE  CvScalar  cvScalarAll( double val0123 )\n{\n    CvScalar scalar;\n    scalar.val[0] = val0123;\n    scalar.val[1] = val0123;\n    scalar.val[2] = val0123;\n    scalar.val[3] = val0123;\n    return scalar;\n}\n\n/****************************************************************************************\\\n*                                   Dynamic Data structures                              *\n\\****************************************************************************************/\n\n/******************************** Memory storage ****************************************/\n\ntypedef struct CvMemBlock\n{\n    struct CvMemBlock*  prev;\n    struct CvMemBlock*  next;\n}\nCvMemBlock;\n\n#define CV_STORAGE_MAGIC_VAL    0x42890000\n\ntypedef struct CvMemStorage\n{\n    int signature;\n    CvMemBlock* bottom;           /**< First allocated block.                   */\n    CvMemBlock* top;              /**< Current memory block - top of the stack. */\n    struct  CvMemStorage* parent; /**< We get new blocks from parent as needed. */\n    int block_size;               /**< Block size.                              */\n    int free_space;               /**< Remaining free space in current block.   */\n}\nCvMemStorage;\n\n#define CV_IS_STORAGE(storage)  \\\n    ((storage) != NULL &&       \\\n    (((CvMemStorage*)(storage))->signature & CV_MAGIC_MASK) == CV_STORAGE_MAGIC_VAL)\n\n\ntypedef struct CvMemStoragePos\n{\n    CvMemBlock* top;\n    int free_space;\n}\nCvMemStoragePos;\n\n\n/*********************************** Sequence *******************************************/\n\ntypedef struct CvSeqBlock\n{\n    struct CvSeqBlock*  prev; /**< Previous sequence block.                   */\n    struct CvSeqBlock*  next; /**< Next sequence block.                       */\n  int    start_index;         /**< Index of the first element in the block +  */\n                              /**< sequence->first->start_index.              */\n    int    count;             /**< Number of elements in the block.           */\n    schar* data;              /**< Pointer to the first element of the block. */\n}\nCvSeqBlock;\n\n\n#define CV_TREE_NODE_FIELDS(node_type)                               \\\n    int       flags;             /**< Miscellaneous flags.     */      \\\n    int       header_size;       /**< Size of sequence header. */      \\\n    struct    node_type* h_prev; /**< Previous sequence.       */      \\\n    struct    node_type* h_next; /**< Next sequence.           */      \\\n    struct    node_type* v_prev; /**< 2nd previous sequence.   */      \\\n    struct    node_type* v_next  /**< 2nd next sequence.       */\n\n/**\n   Read/Write sequence.\n   Elements can be dynamically inserted to or deleted from the sequence.\n*/\n#define CV_SEQUENCE_FIELDS()                                              \\\n    CV_TREE_NODE_FIELDS(CvSeq);                                           \\\n    int       total;          /**< Total number of elements.            */  \\\n    int       elem_size;      /**< Size of sequence element in bytes.   */  \\\n    schar*    block_max;      /**< Maximal bound of the last block.     */  \\\n    schar*    ptr;            /**< Current write pointer.               */  \\\n    int       delta_elems;    /**< Grow seq this many at a time.        */  \\\n    CvMemStorage* storage;    /**< Where the seq is stored.             */  \\\n    CvSeqBlock* free_blocks;  /**< Free blocks list.                    */  \\\n    CvSeqBlock* first;        /**< Pointer to the first sequence block. */\n\ntypedef struct CvSeq\n{\n    CV_SEQUENCE_FIELDS()\n}\nCvSeq;\n\n#define CV_TYPE_NAME_SEQ             \"opencv-sequence\"\n#define CV_TYPE_NAME_SEQ_TREE        \"opencv-sequence-tree\"\n\n/*************************************** Set ********************************************/\n/** @brief Set\n  Order is not preserved. There can be gaps between sequence elements.\n  After the element has been inserted it stays in the same place all the time.\n  The MSB(most-significant or sign bit) of the first field (flags) is 0 iff the element exists.\n*/\n#define CV_SET_ELEM_FIELDS(elem_type)   \\\n    int  flags;                         \\\n    struct elem_type* next_free;\n\ntypedef struct CvSetElem\n{\n    CV_SET_ELEM_FIELDS(CvSetElem)\n}\nCvSetElem;\n\n#define CV_SET_FIELDS()      \\\n    CV_SEQUENCE_FIELDS()     \\\n    CvSetElem* free_elems;   \\\n    int active_count;\n\ntypedef struct CvSet\n{\n    CV_SET_FIELDS()\n}\nCvSet;\n\n\n#define CV_SET_ELEM_IDX_MASK   ((1 << 26) - 1)\n#define CV_SET_ELEM_FREE_FLAG  (1 << (sizeof(int)*8-1))\n\n/** Checks whether the element pointed by ptr belongs to a set or not */\n#define CV_IS_SET_ELEM( ptr )  (((CvSetElem*)(ptr))->flags >= 0)\n\n/************************************* Graph ********************************************/\n\n/** @name Graph\n\nWe represent a graph as a set of vertices. Vertices contain their adjacency lists (more exactly,\npointers to first incoming or outcoming edge (or 0 if isolated vertex)). Edges are stored in\nanother set. There is a singly-linked list of incoming/outcoming edges for each vertex.\n\nEach edge consists of:\n\n- Two pointers to the starting and ending vertices (vtx[0] and vtx[1] respectively).\n\n    A graph may be oriented or not. In the latter case, edges between vertex i to vertex j are not\ndistinguished during search operations.\n\n- Two pointers to next edges for the starting and ending vertices, where next[0] points to the\nnext edge in the vtx[0] adjacency list and next[1] points to the next edge in the vtx[1]\nadjacency list.\n\n@see CvGraphEdge, CvGraphVtx, CvGraphVtx2D, CvGraph\n@{\n*/\n#define CV_GRAPH_EDGE_FIELDS()      \\\n    int flags;                      \\\n    float weight;                   \\\n    struct CvGraphEdge* next[2];    \\\n    struct CvGraphVtx* vtx[2];\n\n\n#define CV_GRAPH_VERTEX_FIELDS()    \\\n    int flags;                      \\\n    struct CvGraphEdge* first;\n\n\ntypedef struct CvGraphEdge\n{\n    CV_GRAPH_EDGE_FIELDS()\n}\nCvGraphEdge;\n\ntypedef struct CvGraphVtx\n{\n    CV_GRAPH_VERTEX_FIELDS()\n}\nCvGraphVtx;\n\ntypedef struct CvGraphVtx2D\n{\n    CV_GRAPH_VERTEX_FIELDS()\n    CvPoint2D32f* ptr;\n}\nCvGraphVtx2D;\n\n/**\n   Graph is \"derived\" from the set (this is set a of vertices)\n   and includes another set (edges)\n*/\n#define  CV_GRAPH_FIELDS()   \\\n    CV_SET_FIELDS()          \\\n    CvSet* edges;\n\ntypedef struct CvGraph\n{\n    CV_GRAPH_FIELDS()\n}\nCvGraph;\n\n#define CV_TYPE_NAME_GRAPH \"opencv-graph\"\n\n/** @} */\n\n/*********************************** Chain/Countour *************************************/\n\ntypedef struct CvChain\n{\n    CV_SEQUENCE_FIELDS()\n    CvPoint  origin;\n}\nCvChain;\n\n#define CV_CONTOUR_FIELDS()  \\\n    CV_SEQUENCE_FIELDS()     \\\n    CvRect rect;             \\\n    int color;               \\\n    int reserved[3];\n\ntypedef struct CvContour\n{\n    CV_CONTOUR_FIELDS()\n}\nCvContour;\n\ntypedef CvContour CvPoint2DSeq;\n\n/****************************************************************************************\\\n*                                    Sequence types                                      *\n\\****************************************************************************************/\n\n#define CV_SEQ_MAGIC_VAL             0x42990000\n\n#define CV_IS_SEQ(seq) \\\n    ((seq) != NULL && (((CvSeq*)(seq))->flags & CV_MAGIC_MASK) == CV_SEQ_MAGIC_VAL)\n\n#define CV_SET_MAGIC_VAL             0x42980000\n#define CV_IS_SET(set) \\\n    ((set) != NULL && (((CvSeq*)(set))->flags & CV_MAGIC_MASK) == CV_SET_MAGIC_VAL)\n\n#define CV_SEQ_ELTYPE_BITS           12\n#define CV_SEQ_ELTYPE_MASK           ((1 << CV_SEQ_ELTYPE_BITS) - 1)\n\n#define CV_SEQ_ELTYPE_POINT          CV_32SC2  /**< (x,y) */\n#define CV_SEQ_ELTYPE_CODE           CV_8UC1   /**< freeman code: 0..7 */\n#define CV_SEQ_ELTYPE_GENERIC        0\n#define CV_SEQ_ELTYPE_PTR            CV_USRTYPE1\n#define CV_SEQ_ELTYPE_PPOINT         CV_SEQ_ELTYPE_PTR  /**< &(x,y) */\n#define CV_SEQ_ELTYPE_INDEX          CV_32SC1  /**< #(x,y) */\n#define CV_SEQ_ELTYPE_GRAPH_EDGE     0  /**< &next_o, &next_d, &vtx_o, &vtx_d */\n#define CV_SEQ_ELTYPE_GRAPH_VERTEX   0  /**< first_edge, &(x,y) */\n#define CV_SEQ_ELTYPE_TRIAN_ATR      0  /**< vertex of the binary tree   */\n#define CV_SEQ_ELTYPE_CONNECTED_COMP 0  /**< connected component  */\n#define CV_SEQ_ELTYPE_POINT3D        CV_32FC3  /**< (x,y,z)  */\n\n#define CV_SEQ_KIND_BITS        2\n#define CV_SEQ_KIND_MASK        (((1 << CV_SEQ_KIND_BITS) - 1)<<CV_SEQ_ELTYPE_BITS)\n\n/** types of sequences */\n#define CV_SEQ_KIND_GENERIC     (0 << CV_SEQ_ELTYPE_BITS)\n#define CV_SEQ_KIND_CURVE       (1 << CV_SEQ_ELTYPE_BITS)\n#define CV_SEQ_KIND_BIN_TREE    (2 << CV_SEQ_ELTYPE_BITS)\n\n/** types of sparse sequences (sets) */\n#define CV_SEQ_KIND_GRAPH       (1 << CV_SEQ_ELTYPE_BITS)\n#define CV_SEQ_KIND_SUBDIV2D    (2 << CV_SEQ_ELTYPE_BITS)\n\n#define CV_SEQ_FLAG_SHIFT       (CV_SEQ_KIND_BITS + CV_SEQ_ELTYPE_BITS)\n\n/** flags for curves */\n#define CV_SEQ_FLAG_CLOSED     (1 << CV_SEQ_FLAG_SHIFT)\n#define CV_SEQ_FLAG_SIMPLE     (0 << CV_SEQ_FLAG_SHIFT)\n#define CV_SEQ_FLAG_CONVEX     (0 << CV_SEQ_FLAG_SHIFT)\n#define CV_SEQ_FLAG_HOLE       (2 << CV_SEQ_FLAG_SHIFT)\n\n/** flags for graphs */\n#define CV_GRAPH_FLAG_ORIENTED (1 << CV_SEQ_FLAG_SHIFT)\n\n#define CV_GRAPH               CV_SEQ_KIND_GRAPH\n#define CV_ORIENTED_GRAPH      (CV_SEQ_KIND_GRAPH|CV_GRAPH_FLAG_ORIENTED)\n\n/** point sets */\n#define CV_SEQ_POINT_SET       (CV_SEQ_KIND_GENERIC| CV_SEQ_ELTYPE_POINT)\n#define CV_SEQ_POINT3D_SET     (CV_SEQ_KIND_GENERIC| CV_SEQ_ELTYPE_POINT3D)\n#define CV_SEQ_POLYLINE        (CV_SEQ_KIND_CURVE  | CV_SEQ_ELTYPE_POINT)\n#define CV_SEQ_POLYGON         (CV_SEQ_FLAG_CLOSED | CV_SEQ_POLYLINE )\n#define CV_SEQ_CONTOUR         CV_SEQ_POLYGON\n#define CV_SEQ_SIMPLE_POLYGON  (CV_SEQ_FLAG_SIMPLE | CV_SEQ_POLYGON  )\n\n/** chain-coded curves */\n#define CV_SEQ_CHAIN           (CV_SEQ_KIND_CURVE  | CV_SEQ_ELTYPE_CODE)\n#define CV_SEQ_CHAIN_CONTOUR   (CV_SEQ_FLAG_CLOSED | CV_SEQ_CHAIN)\n\n/** binary tree for the contour */\n#define CV_SEQ_POLYGON_TREE    (CV_SEQ_KIND_BIN_TREE  | CV_SEQ_ELTYPE_TRIAN_ATR)\n\n/** sequence of the connected components */\n#define CV_SEQ_CONNECTED_COMP  (CV_SEQ_KIND_GENERIC  | CV_SEQ_ELTYPE_CONNECTED_COMP)\n\n/** sequence of the integer numbers */\n#define CV_SEQ_INDEX           (CV_SEQ_KIND_GENERIC  | CV_SEQ_ELTYPE_INDEX)\n\n#define CV_SEQ_ELTYPE( seq )   ((seq)->flags & CV_SEQ_ELTYPE_MASK)\n#define CV_SEQ_KIND( seq )     ((seq)->flags & CV_SEQ_KIND_MASK )\n\n/** flag checking */\n#define CV_IS_SEQ_INDEX( seq )      ((CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_INDEX) && \\\n                                     (CV_SEQ_KIND(seq) == CV_SEQ_KIND_GENERIC))\n\n#define CV_IS_SEQ_CURVE( seq )      (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE)\n#define CV_IS_SEQ_CLOSED( seq )     (((seq)->flags & CV_SEQ_FLAG_CLOSED) != 0)\n#define CV_IS_SEQ_CONVEX( seq )     0\n#define CV_IS_SEQ_HOLE( seq )       (((seq)->flags & CV_SEQ_FLAG_HOLE) != 0)\n#define CV_IS_SEQ_SIMPLE( seq )     1\n\n/** type checking macros */\n#define CV_IS_SEQ_POINT_SET( seq ) \\\n    ((CV_SEQ_ELTYPE(seq) == CV_32SC2 || CV_SEQ_ELTYPE(seq) == CV_32FC2))\n\n#define CV_IS_SEQ_POINT_SUBSET( seq ) \\\n    (CV_IS_SEQ_INDEX( seq ) || CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_PPOINT)\n\n#define CV_IS_SEQ_POLYLINE( seq )   \\\n    (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && CV_IS_SEQ_POINT_SET(seq))\n\n#define CV_IS_SEQ_POLYGON( seq )   \\\n    (CV_IS_SEQ_POLYLINE(seq) && CV_IS_SEQ_CLOSED(seq))\n\n#define CV_IS_SEQ_CHAIN( seq )   \\\n    (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && (seq)->elem_size == 1)\n\n#define CV_IS_SEQ_CONTOUR( seq )   \\\n    (CV_IS_SEQ_CLOSED(seq) && (CV_IS_SEQ_POLYLINE(seq) || CV_IS_SEQ_CHAIN(seq)))\n\n#define CV_IS_SEQ_CHAIN_CONTOUR( seq ) \\\n    (CV_IS_SEQ_CHAIN( seq ) && CV_IS_SEQ_CLOSED( seq ))\n\n#define CV_IS_SEQ_POLYGON_TREE( seq ) \\\n    (CV_SEQ_ELTYPE (seq) ==  CV_SEQ_ELTYPE_TRIAN_ATR &&    \\\n    CV_SEQ_KIND( seq ) ==  CV_SEQ_KIND_BIN_TREE )\n\n#define CV_IS_GRAPH( seq )    \\\n    (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_GRAPH)\n\n#define CV_IS_GRAPH_ORIENTED( seq )   \\\n    (((seq)->flags & CV_GRAPH_FLAG_ORIENTED) != 0)\n\n#define CV_IS_SUBDIV2D( seq )  \\\n    (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_SUBDIV2D)\n\n/****************************************************************************************/\n/*                            Sequence writer & reader                                  */\n/****************************************************************************************/\n\n#define CV_SEQ_WRITER_FIELDS()                                     \\\n    int          header_size;                                      \\\n    CvSeq*       seq;        /**< the sequence written */            \\\n    CvSeqBlock*  block;      /**< current block */                   \\\n    schar*       ptr;        /**< pointer to free space */           \\\n    schar*       block_min;  /**< pointer to the beginning of block*/\\\n    schar*       block_max;  /**< pointer to the end of block */\n\ntypedef struct CvSeqWriter\n{\n    CV_SEQ_WRITER_FIELDS()\n}\nCvSeqWriter;\n\n\n#define CV_SEQ_READER_FIELDS()                                      \\\n    int          header_size;                                       \\\n    CvSeq*       seq;        /**< sequence, beign read */             \\\n    CvSeqBlock*  block;      /**< current block */                    \\\n    schar*       ptr;        /**< pointer to element be read next */  \\\n    schar*       block_min;  /**< pointer to the beginning of block */\\\n    schar*       block_max;  /**< pointer to the end of block */      \\\n    int          delta_index;/**< = seq->first->start_index   */      \\\n    schar*       prev_elem;  /**< pointer to previous element */\n\ntypedef struct CvSeqReader\n{\n    CV_SEQ_READER_FIELDS()\n}\nCvSeqReader;\n\n/****************************************************************************************/\n/*                                Operations on sequences                               */\n/****************************************************************************************/\n\n#define  CV_SEQ_ELEM( seq, elem_type, index )                    \\\n/** assert gives some guarantee that <seq> parameter is valid */  \\\n(   assert(sizeof((seq)->first[0]) == sizeof(CvSeqBlock) &&      \\\n    (seq)->elem_size == sizeof(elem_type)),                      \\\n    (elem_type*)((seq)->first && (unsigned)index <               \\\n    (unsigned)((seq)->first->count) ?                            \\\n    (seq)->first->data + (index) * sizeof(elem_type) :           \\\n    cvGetSeqElem( (CvSeq*)(seq), (index) )))\n#define CV_GET_SEQ_ELEM( elem_type, seq, index ) CV_SEQ_ELEM( (seq), elem_type, (index) )\n\n/** Add element to sequence: */\n#define CV_WRITE_SEQ_ELEM_VAR( elem_ptr, writer )     \\\n{                                                     \\\n    if( (writer).ptr >= (writer).block_max )          \\\n    {                                                 \\\n        cvCreateSeqBlock( &writer);                   \\\n    }                                                 \\\n    memcpy((writer).ptr, elem_ptr, (writer).seq->elem_size);\\\n    (writer).ptr += (writer).seq->elem_size;          \\\n}\n\n#define CV_WRITE_SEQ_ELEM( elem, writer )             \\\n{                                                     \\\n    assert( (writer).seq->elem_size == sizeof(elem)); \\\n    if( (writer).ptr >= (writer).block_max )          \\\n    {                                                 \\\n        cvCreateSeqBlock( &writer);                   \\\n    }                                                 \\\n    assert( (writer).ptr <= (writer).block_max - sizeof(elem));\\\n    memcpy((writer).ptr, &(elem), sizeof(elem));      \\\n    (writer).ptr += sizeof(elem);                     \\\n}\n\n\n/** Move reader position forward: */\n#define CV_NEXT_SEQ_ELEM( elem_size, reader )                 \\\n{                                                             \\\n    if( ((reader).ptr += (elem_size)) >= (reader).block_max ) \\\n    {                                                         \\\n        cvChangeSeqBlock( &(reader), 1 );                     \\\n    }                                                         \\\n}\n\n\n/** Move reader position backward: */\n#define CV_PREV_SEQ_ELEM( elem_size, reader )                \\\n{                                                            \\\n    if( ((reader).ptr -= (elem_size)) < (reader).block_min ) \\\n    {                                                        \\\n        cvChangeSeqBlock( &(reader), -1 );                   \\\n    }                                                        \\\n}\n\n/** Read element and move read position forward: */\n#define CV_READ_SEQ_ELEM( elem, reader )                       \\\n{                                                              \\\n    assert( (reader).seq->elem_size == sizeof(elem));          \\\n    memcpy( &(elem), (reader).ptr, sizeof((elem)));            \\\n    CV_NEXT_SEQ_ELEM( sizeof(elem), reader )                   \\\n}\n\n/** Read element and move read position backward: */\n#define CV_REV_READ_SEQ_ELEM( elem, reader )                     \\\n{                                                                \\\n    assert( (reader).seq->elem_size == sizeof(elem));            \\\n    memcpy(&(elem), (reader).ptr, sizeof((elem)));               \\\n    CV_PREV_SEQ_ELEM( sizeof(elem), reader )                     \\\n}\n\n\n#define CV_READ_CHAIN_POINT( _pt, reader )                              \\\n{                                                                       \\\n    (_pt) = (reader).pt;                                                \\\n    if( (reader).ptr )                                                  \\\n    {                                                                   \\\n        CV_READ_SEQ_ELEM( (reader).code, (reader));                     \\\n        assert( ((reader).code & ~7) == 0 );                            \\\n        (reader).pt.x += (reader).deltas[(int)(reader).code][0];        \\\n        (reader).pt.y += (reader).deltas[(int)(reader).code][1];        \\\n    }                                                                   \\\n}\n\n#define CV_CURRENT_POINT( reader )  (*((CvPoint*)((reader).ptr)))\n#define CV_PREV_POINT( reader )     (*((CvPoint*)((reader).prev_elem)))\n\n#define CV_READ_EDGE( pt1, pt2, reader )               \\\n{                                                      \\\n    assert( sizeof(pt1) == sizeof(CvPoint) &&          \\\n            sizeof(pt2) == sizeof(CvPoint) &&          \\\n            reader.seq->elem_size == sizeof(CvPoint)); \\\n    (pt1) = CV_PREV_POINT( reader );                   \\\n    (pt2) = CV_CURRENT_POINT( reader );                \\\n    (reader).prev_elem = (reader).ptr;                 \\\n    CV_NEXT_SEQ_ELEM( sizeof(CvPoint), (reader));      \\\n}\n\n/************ Graph macros ************/\n\n/** Return next graph edge for given vertex: */\n#define  CV_NEXT_GRAPH_EDGE( edge, vertex )                              \\\n     (assert((edge)->vtx[0] == (vertex) || (edge)->vtx[1] == (vertex)),  \\\n      (edge)->next[(edge)->vtx[1] == (vertex)])\n\n\n\n/****************************************************************************************\\\n*             Data structures for persistence (a.k.a serialization) functionality        *\n\\****************************************************************************************/\n\n/** \"black box\" file storage */\ntypedef struct CvFileStorage CvFileStorage;\n\n/** Storage flags: */\n#define CV_STORAGE_READ          0\n#define CV_STORAGE_WRITE         1\n#define CV_STORAGE_WRITE_TEXT    CV_STORAGE_WRITE\n#define CV_STORAGE_WRITE_BINARY  CV_STORAGE_WRITE\n#define CV_STORAGE_APPEND        2\n#define CV_STORAGE_MEMORY        4\n#define CV_STORAGE_FORMAT_MASK   (7<<3)\n#define CV_STORAGE_FORMAT_AUTO   0\n#define CV_STORAGE_FORMAT_XML    8\n#define CV_STORAGE_FORMAT_YAML  16\n\n/** @brief List of attributes. :\n\nIn the current implementation, attributes are used to pass extra parameters when writing user\nobjects (see cvWrite). XML attributes inside tags are not supported, aside from the object type\nspecification (type_id attribute).\n@see cvAttrList, cvAttrValue\n */\ntypedef struct CvAttrList\n{\n    const char** attr;         /**< NULL-terminated array of (attribute_name,attribute_value) pairs. */\n    struct CvAttrList* next;   /**< Pointer to next chunk of the attributes list.                    */\n}\nCvAttrList;\n\n/** initializes CvAttrList structure */\nCV_INLINE CvAttrList cvAttrList( const char** attr CV_DEFAULT(NULL),\n                                 CvAttrList* next CV_DEFAULT(NULL) )\n{\n    CvAttrList l;\n    l.attr = attr;\n    l.next = next;\n\n    return l;\n}\n\nstruct CvTypeInfo;\n\n#define CV_NODE_NONE        0\n#define CV_NODE_INT         1\n#define CV_NODE_INTEGER     CV_NODE_INT\n#define CV_NODE_REAL        2\n#define CV_NODE_FLOAT       CV_NODE_REAL\n#define CV_NODE_STR         3\n#define CV_NODE_STRING      CV_NODE_STR\n#define CV_NODE_REF         4 /**< not used */\n#define CV_NODE_SEQ         5\n#define CV_NODE_MAP         6\n#define CV_NODE_TYPE_MASK   7\n\n#define CV_NODE_TYPE(flags)  ((flags) & CV_NODE_TYPE_MASK)\n\n/** file node flags */\n#define CV_NODE_FLOW        8 /**<Used only for writing structures in YAML format. */\n#define CV_NODE_USER        16\n#define CV_NODE_EMPTY       32\n#define CV_NODE_NAMED       64\n\n#define CV_NODE_IS_INT(flags)        (CV_NODE_TYPE(flags) == CV_NODE_INT)\n#define CV_NODE_IS_REAL(flags)       (CV_NODE_TYPE(flags) == CV_NODE_REAL)\n#define CV_NODE_IS_STRING(flags)     (CV_NODE_TYPE(flags) == CV_NODE_STRING)\n#define CV_NODE_IS_SEQ(flags)        (CV_NODE_TYPE(flags) == CV_NODE_SEQ)\n#define CV_NODE_IS_MAP(flags)        (CV_NODE_TYPE(flags) == CV_NODE_MAP)\n#define CV_NODE_IS_COLLECTION(flags) (CV_NODE_TYPE(flags) >= CV_NODE_SEQ)\n#define CV_NODE_IS_FLOW(flags)       (((flags) & CV_NODE_FLOW) != 0)\n#define CV_NODE_IS_EMPTY(flags)      (((flags) & CV_NODE_EMPTY) != 0)\n#define CV_NODE_IS_USER(flags)       (((flags) & CV_NODE_USER) != 0)\n#define CV_NODE_HAS_NAME(flags)      (((flags) & CV_NODE_NAMED) != 0)\n\n#define CV_NODE_SEQ_SIMPLE 256\n#define CV_NODE_SEQ_IS_SIMPLE(seq) (((seq)->flags & CV_NODE_SEQ_SIMPLE) != 0)\n\ntypedef struct CvString\n{\n    int len;\n    char* ptr;\n}\nCvString;\n\n/** All the keys (names) of elements in the readed file storage\n   are stored in the hash to speed up the lookup operations: */\ntypedef struct CvStringHashNode\n{\n    unsigned hashval;\n    CvString str;\n    struct CvStringHashNode* next;\n}\nCvStringHashNode;\n\ntypedef struct CvGenericHash CvFileNodeHash;\n\n/** Basic element of the file storage - scalar or collection: */\ntypedef struct CvFileNode\n{\n    int tag;\n    struct CvTypeInfo* info; /**< type information\n            (only for user-defined object, for others it is 0) */\n    union\n    {\n        double f; /**< scalar floating-point number */\n        int i;    /**< scalar integer number */\n        CvString str; /**< text string */\n        CvSeq* seq; /**< sequence (ordered collection of file nodes) */\n        CvFileNodeHash* map; /**< map (collection of named file nodes) */\n    } data;\n}\nCvFileNode;\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\ntypedef int (CV_CDECL *CvIsInstanceFunc)( const void* struct_ptr );\ntypedef void (CV_CDECL *CvReleaseFunc)( void** struct_dblptr );\ntypedef void* (CV_CDECL *CvReadFunc)( CvFileStorage* storage, CvFileNode* node );\ntypedef void (CV_CDECL *CvWriteFunc)( CvFileStorage* storage, const char* name,\n                                      const void* struct_ptr, CvAttrList attributes );\ntypedef void* (CV_CDECL *CvCloneFunc)( const void* struct_ptr );\n#ifdef __cplusplus\n}\n#endif\n\n/** @brief Type information\n\nThe structure contains information about one of the standard or user-defined types. Instances of the\ntype may or may not contain a pointer to the corresponding CvTypeInfo structure. In any case, there\nis a way to find the type info structure for a given object using the cvTypeOf function.\nAlternatively, type info can be found by type name using cvFindType, which is used when an object\nis read from file storage. The user can register a new type with cvRegisterType that adds the type\ninformation structure into the beginning of the type list. Thus, it is possible to create\nspecialized types from generic standard types and override the basic methods.\n */\ntypedef struct CvTypeInfo\n{\n    int flags; /**< not used */\n    int header_size; /**< sizeof(CvTypeInfo) */\n    struct CvTypeInfo* prev; /**< previous registered type in the list */\n    struct CvTypeInfo* next; /**< next registered type in the list */\n    const char* type_name; /**< type name, written to file storage */\n    CvIsInstanceFunc is_instance; /**< checks if the passed object belongs to the type */\n    CvReleaseFunc release; /**< releases object (memory etc.) */\n    CvReadFunc read; /**< reads object from file storage */\n    CvWriteFunc write; /**< writes object to file storage */\n    CvCloneFunc clone; /**< creates a copy of the object */\n}\nCvTypeInfo;\n\n\n/**** System data types ******/\n\ntypedef struct CvPluginFuncInfo\n{\n    void** func_addr;\n    void* default_func_addr;\n    const char* func_names;\n    int search_modules;\n    int loaded_from;\n}\nCvPluginFuncInfo;\n\ntypedef struct CvModuleInfo\n{\n    struct CvModuleInfo* next;\n    const char* name;\n    const char* version;\n    CvPluginFuncInfo* func_tab;\n}\nCvModuleInfo;\n\n/** @} */\n\n#endif /*__OPENCV_CORE_TYPES_H__*/\n\n/* End of file. */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/utility.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_UTILITY_H__\n#define __OPENCV_CORE_UTILITY_H__\n\n#ifndef __cplusplus\n#  error utility.hpp header must be compiled as C++\n#endif\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\n\n#ifdef CV_COLLECT_IMPL_DATA\nCV_EXPORTS void setImpl(int flags); // set implementation flags and reset storage arrays\nCV_EXPORTS void addImpl(int flag, const char* func = 0); // add implementation and function name to storage arrays\n// Get stored implementation flags and fucntions names arrays\n// Each implementation entry correspond to function name entry, so you can find which implementation was executed in which fucntion\nCV_EXPORTS int getImpl(std::vector<int> &impl, std::vector<String> &funName);\n\nCV_EXPORTS bool useCollection(); // return implementation collection state\nCV_EXPORTS void setUseCollection(bool flag); // set implementation collection state\n\n#define CV_IMPL_PLAIN  0x01 // native CPU OpenCV implementation\n#define CV_IMPL_OCL    0x02 // OpenCL implementation\n#define CV_IMPL_IPP    0x04 // IPP implementation\n#define CV_IMPL_MT     0x10 // multithreaded implementation\n\n#define CV_IMPL_ADD(impl)                                                   \\\n    if(cv::useCollection())                                                 \\\n    {                                                                       \\\n        cv::addImpl(impl, CV_Func);                                         \\\n    }\n#else\n#define CV_IMPL_ADD(impl)\n#endif\n\n//! @addtogroup core_utils\n//! @{\n\n/** @brief  Automatically Allocated Buffer Class\n\n The class is used for temporary buffers in functions and methods.\n If a temporary buffer is usually small (a few K's of memory),\n but its size depends on the parameters, it makes sense to create a small\n fixed-size array on stack and use it if it's large enough. If the required buffer size\n is larger than the fixed size, another buffer of sufficient size is allocated dynamically\n and released after the processing. Therefore, in typical cases, when the buffer size is small,\n there is no overhead associated with malloc()/free().\n At the same time, there is no limit on the size of processed data.\n\n This is what AutoBuffer does. The template takes 2 parameters - type of the buffer elements and\n the number of stack-allocated elements. Here is how the class is used:\n\n \\code\n void my_func(const cv::Mat& m)\n {\n    cv::AutoBuffer<float> buf; // create automatic buffer containing 1000 floats\n\n    buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used,\n                          // otherwise the buffer of \"m.rows\" floats will be allocated\n                          // dynamically and deallocated in cv::AutoBuffer destructor\n    ...\n }\n \\endcode\n*/\ntemplate<typename _Tp, size_t fixed_size = 1024/sizeof(_Tp)+8> class AutoBuffer\n{\npublic:\n    typedef _Tp value_type;\n\n    //! the default constructor\n    AutoBuffer();\n    //! constructor taking the real buffer size\n    AutoBuffer(size_t _size);\n\n    //! the copy constructor\n    AutoBuffer(const AutoBuffer<_Tp, fixed_size>& buf);\n    //! the assignment operator\n    AutoBuffer<_Tp, fixed_size>& operator = (const AutoBuffer<_Tp, fixed_size>& buf);\n\n    //! destructor. calls deallocate()\n    ~AutoBuffer();\n\n    //! allocates the new buffer of size _size. if the _size is small enough, stack-allocated buffer is used\n    void allocate(size_t _size);\n    //! deallocates the buffer if it was dynamically allocated\n    void deallocate();\n    //! resizes the buffer and preserves the content\n    void resize(size_t _size);\n    //! returns the current buffer size\n    size_t size() const;\n    //! returns pointer to the real buffer, stack-allocated or head-allocated\n    operator _Tp* ();\n    //! returns read-only pointer to the real buffer, stack-allocated or head-allocated\n    operator const _Tp* () const;\n\nprotected:\n    //! pointer to the real buffer, can point to buf if the buffer is small enough\n    _Tp* ptr;\n    //! size of the real buffer\n    size_t sz;\n    //! pre-allocated buffer. At least 1 element to confirm C++ standard reqirements\n    _Tp buf[(fixed_size > 0) ? fixed_size : 1];\n};\n\n/**  @brief Sets/resets the break-on-error mode.\n\nWhen the break-on-error mode is set, the default error handler issues a hardware exception, which\ncan make debugging more convenient.\n\n\\return the previous state\n */\nCV_EXPORTS bool setBreakOnError(bool flag);\n\nextern \"C\" typedef int (*ErrorCallback)( int status, const char* func_name,\n                                       const char* err_msg, const char* file_name,\n                                       int line, void* userdata );\n\n\n/** @brief Sets the new error handler and the optional user data.\n\n  The function sets the new error handler, called from cv::error().\n\n  \\param errCallback the new error handler. If NULL, the default error handler is used.\n  \\param userdata the optional user data pointer, passed to the callback.\n  \\param prevUserdata the optional output parameter where the previous user data pointer is stored\n\n  \\return the previous error handler\n*/\nCV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback, void* userdata=0, void** prevUserdata=0);\n\n/** @brief Returns a text string formatted using the printf-like expression.\n\nThe function acts like sprintf but forms and returns an STL string. It can be used to form an error\nmessage in the Exception constructor.\n@param fmt printf-compatible formatting specifiers.\n */\nCV_EXPORTS String format( const char* fmt, ... );\nCV_EXPORTS String tempfile( const char* suffix = 0);\nCV_EXPORTS void glob(String pattern, std::vector<String>& result, bool recursive = false);\n\n/** @brief OpenCV will try to set the number of threads for the next parallel region.\n\nIf threads == 0, OpenCV will disable threading optimizations and run all it's functions\nsequentially. Passing threads \\< 0 will reset threads number to system default. This function must\nbe called outside of parallel region.\n\nOpenCV will try to run it's functions with specified threads number, but some behaviour differs from\nframework:\n-   `TBB` – User-defined parallel constructions will run with the same threads number, if\n    another does not specified. If late on user creates own scheduler, OpenCV will be use it.\n-   `OpenMP` – No special defined behaviour.\n-   `Concurrency` – If threads == 1, OpenCV will disable threading optimizations and run it's\n    functions sequentially.\n-   `GCD` – Supports only values \\<= 0.\n-   `C=` – No special defined behaviour.\n@param nthreads Number of threads used by OpenCV.\n@sa getNumThreads, getThreadNum\n */\nCV_EXPORTS_W void setNumThreads(int nthreads);\n\n/** @brief Returns the number of threads used by OpenCV for parallel regions.\n\nAlways returns 1 if OpenCV is built without threading support.\n\nThe exact meaning of return value depends on the threading framework used by OpenCV library:\n- `TBB` – The number of threads, that OpenCV will try to use for parallel regions. If there is\n  any tbb::thread_scheduler_init in user code conflicting with OpenCV, then function returns\n  default number of threads used by TBB library.\n- `OpenMP` – An upper bound on the number of threads that could be used to form a new team.\n- `Concurrency` – The number of threads, that OpenCV will try to use for parallel regions.\n- `GCD` – Unsupported; returns the GCD thread pool limit (512) for compatibility.\n- `C=` – The number of threads, that OpenCV will try to use for parallel regions, if before\n  called setNumThreads with threads \\> 0, otherwise returns the number of logical CPUs,\n  available for the process.\n@sa setNumThreads, getThreadNum\n */\nCV_EXPORTS_W int getNumThreads();\n\n/** @brief Returns the index of the currently executed thread within the current parallel region. Always\nreturns 0 if called outside of parallel region.\n\nThe exact meaning of return value depends on the threading framework used by OpenCV library:\n- `TBB` – Unsupported with current 4.1 TBB release. May be will be supported in future.\n- `OpenMP` – The thread number, within the current team, of the calling thread.\n- `Concurrency` – An ID for the virtual processor that the current context is executing on (0\n  for master thread and unique number for others, but not necessary 1,2,3,...).\n- `GCD` – System calling thread's ID. Never returns 0 inside parallel region.\n- `C=` – The index of the current parallel task.\n@sa setNumThreads, getNumThreads\n */\nCV_EXPORTS_W int getThreadNum();\n\n/** @brief Returns full configuration time cmake output.\n\nReturned value is raw cmake output including version control system revision, compiler version,\ncompiler flags, enabled modules and third party libraries, etc. Output format depends on target\narchitecture.\n */\nCV_EXPORTS_W const String& getBuildInformation();\n\n/** @brief Returns the number of ticks.\n\nThe function returns the number of ticks after the certain event (for example, when the machine was\nturned on). It can be used to initialize RNG or to measure a function execution time by reading the\ntick count before and after the function call. See also the tick frequency.\n */\nCV_EXPORTS_W int64 getTickCount();\n\n/** @brief Returns the number of ticks per second.\n\nThe function returns the number of ticks per second. That is, the following code computes the\nexecution time in seconds:\n@code\n    double t = (double)getTickCount();\n    // do something ...\n    t = ((double)getTickCount() - t)/getTickFrequency();\n@endcode\n */\nCV_EXPORTS_W double getTickFrequency();\n\n/** @brief Returns the number of CPU ticks.\n\nThe function returns the current number of CPU ticks on some architectures (such as x86, x64,\nPowerPC). On other platforms the function is equivalent to getTickCount. It can also be used for\nvery accurate time measurements, as well as for RNG initialization. Note that in case of multi-CPU\nsystems a thread, from which getCPUTickCount is called, can be suspended and resumed at another CPU\nwith its own counter. So, theoretically (and practically) the subsequent calls to the function do\nnot necessary return the monotonously increasing values. Also, since a modern CPU varies the CPU\nfrequency depending on the load, the number of CPU clocks spent in some code cannot be directly\nconverted to time units. Therefore, getTickCount is generally a preferable solution for measuring\nexecution time.\n */\nCV_EXPORTS_W int64 getCPUTickCount();\n\n/** @brief Returns true if the specified feature is supported by the host hardware.\n\nThe function returns true if the host hardware supports the specified feature. When user calls\nsetUseOptimized(false), the subsequent calls to checkHardwareSupport() will return false until\nsetUseOptimized(true) is called. This way user can dynamically switch on and off the optimized code\nin OpenCV.\n@param feature The feature of interest, one of cv::CpuFeatures\n */\nCV_EXPORTS_W bool checkHardwareSupport(int feature);\n\n/** @brief Returns the number of logical CPUs available for the process.\n */\nCV_EXPORTS_W int getNumberOfCPUs();\n\n\n/** @brief Aligns a pointer to the specified number of bytes.\n\nThe function returns the aligned pointer of the same type as the input pointer:\n\\f[\\texttt{(_Tp*)(((size_t)ptr + n-1) & -n)}\\f]\n@param ptr Aligned pointer.\n@param n Alignment size that must be a power of two.\n */\ntemplate<typename _Tp> static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp))\n{\n    return (_Tp*)(((size_t)ptr + n-1) & -n);\n}\n\n/** @brief Aligns a buffer size to the specified number of bytes.\n\nThe function returns the minimum number that is greater or equal to sz and is divisible by n :\n\\f[\\texttt{(sz + n-1) & -n}\\f]\n@param sz Buffer size to align.\n@param n Alignment size that must be a power of two.\n */\nstatic inline size_t alignSize(size_t sz, int n)\n{\n    CV_DbgAssert((n & (n - 1)) == 0); // n is a power of 2\n    return (sz + n-1) & -n;\n}\n\n/** @brief Enables or disables the optimized code.\n\nThe function can be used to dynamically turn on and off optimized code (code that uses SSE2, AVX,\nand other instructions on the platforms that support it). It sets a global flag that is further\nchecked by OpenCV functions. Since the flag is not checked in the inner OpenCV loops, it is only\nsafe to call the function on the very top level in your application where you can be sure that no\nother OpenCV function is currently executed.\n\nBy default, the optimized code is enabled unless you disable it in CMake. The current status can be\nretrieved using useOptimized.\n@param onoff The boolean flag specifying whether the optimized code should be used (onoff=true)\nor not (onoff=false).\n */\nCV_EXPORTS_W void setUseOptimized(bool onoff);\n\n/** @brief Returns the status of optimized code usage.\n\nThe function returns true if the optimized code is enabled. Otherwise, it returns false.\n */\nCV_EXPORTS_W bool useOptimized();\n\nstatic inline size_t getElemSize(int type) { return CV_ELEM_SIZE(type); }\n\n/////////////////////////////// Parallel Primitives //////////////////////////////////\n\n/** @brief Base class for parallel data processors\n*/\nclass CV_EXPORTS ParallelLoopBody\n{\npublic:\n    virtual ~ParallelLoopBody();\n    virtual void operator() (const Range& range) const = 0;\n};\n\n/** @brief Parallel data processor\n*/\nCV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.);\n\n/////////////////////////////// forEach method of cv::Mat ////////////////////////////\ntemplate<typename _Tp, typename Functor> inline\nvoid Mat::forEach_impl(const Functor& operation) {\n    if (false) {\n        operation(*reinterpret_cast<_Tp*>(0), reinterpret_cast<int*>(NULL));\n        // If your compiler fail in this line.\n        // Please check that your functor signature is\n        //     (_Tp&, const int*)   <- multidimential\n        //  or (_Tp&, void*)        <- in case of you don't need current idx.\n    }\n\n    CV_Assert(this->total() / this->size[this->dims - 1] <= INT_MAX);\n    const int LINES = static_cast<int>(this->total() / this->size[this->dims - 1]);\n\n    class PixelOperationWrapper :public ParallelLoopBody\n    {\n    public:\n        PixelOperationWrapper(Mat_<_Tp>* const frame, const Functor& _operation)\n            : mat(frame), op(_operation) {};\n        virtual ~PixelOperationWrapper(){};\n        // ! Overloaded virtual operator\n        // convert range call to row call.\n        virtual void operator()(const Range &range) const {\n            const int DIMS = mat->dims;\n            const int COLS = mat->size[DIMS - 1];\n            if (DIMS <= 2) {\n                for (int row = range.start; row < range.end; ++row) {\n                    this->rowCall2(row, COLS);\n                }\n            } else {\n                std::vector<int> idx(COLS); /// idx is modified in this->rowCall\n                idx[DIMS - 2] = range.start - 1;\n\n                for (int line_num = range.start; line_num < range.end; ++line_num) {\n                    idx[DIMS - 2]++;\n                    for (int i = DIMS - 2; i >= 0; --i) {\n                        if (idx[i] >= mat->size[i]) {\n                            idx[i - 1] += idx[i] / mat->size[i];\n                            idx[i] %= mat->size[i];\n                            continue; // carry-over;\n                        }\n                        else {\n                            break;\n                        }\n                    }\n                    this->rowCall(&idx[0], COLS, DIMS);\n                }\n            }\n        };\n    private:\n        Mat_<_Tp>* const mat;\n        const Functor op;\n        // ! Call operator for each elements in this row.\n        inline void rowCall(int* const idx, const int COLS, const int DIMS) const {\n            int &col = idx[DIMS - 1];\n            col = 0;\n            _Tp* pixel = &(mat->template at<_Tp>(idx));\n\n            while (col < COLS) {\n                op(*pixel, const_cast<const int*>(idx));\n                pixel++; col++;\n            }\n            col = 0;\n        }\n        // ! Call operator for each elements in this row. 2d mat special version.\n        inline void rowCall2(const int row, const int COLS) const {\n            union Index{\n                int body[2];\n                operator const int*() const {\n                    return reinterpret_cast<const int*>(this);\n                }\n                int& operator[](const int i) {\n                    return body[i];\n                }\n            } idx = {{row, 0}};\n            // Special union is needed to avoid\n            // \"error: array subscript is above array bounds [-Werror=array-bounds]\"\n            // when call the functor `op` such that access idx[3].\n\n            _Tp* pixel = &(mat->template at<_Tp>(idx));\n            const _Tp* const pixel_end = pixel + COLS;\n            while(pixel < pixel_end) {\n                op(*pixel++, static_cast<const int*>(idx));\n                idx[1]++;\n            }\n        };\n        PixelOperationWrapper& operator=(const PixelOperationWrapper &) {\n            CV_Assert(false);\n            // We can not remove this implementation because Visual Studio warning C4822.\n            return *this;\n        };\n    };\n\n    parallel_for_(cv::Range(0, LINES), PixelOperationWrapper(reinterpret_cast<Mat_<_Tp>*>(this), operation));\n}\n\n/////////////////////////// Synchronization Primitives ///////////////////////////////\n\nclass CV_EXPORTS Mutex\n{\npublic:\n    Mutex();\n    ~Mutex();\n    Mutex(const Mutex& m);\n    Mutex& operator = (const Mutex& m);\n\n    void lock();\n    bool trylock();\n    void unlock();\n\n    struct Impl;\nprotected:\n    Impl* impl;\n};\n\nclass CV_EXPORTS AutoLock\n{\npublic:\n    AutoLock(Mutex& m) : mutex(&m) { mutex->lock(); }\n    ~AutoLock() { mutex->unlock(); }\nprotected:\n    Mutex* mutex;\nprivate:\n    AutoLock(const AutoLock&);\n    AutoLock& operator = (const AutoLock&);\n};\n\n// TLS interface\nclass CV_EXPORTS TLSDataContainer\n{\nprotected:\n    TLSDataContainer();\n    virtual ~TLSDataContainer();\n\n    void  gatherData(std::vector<void*> &data) const;\n#if OPENCV_ABI_COMPATIBILITY > 300\n    void* getData() const;\n    void  release();\n\nprivate:\n#else\n    void  release();\n\npublic:\n    void* getData() const;\n#endif\n    virtual void* createDataInstance() const = 0;\n    virtual void  deleteDataInstance(void* pData) const = 0;\n\n    int key_;\n};\n\n// Main TLS data class\ntemplate <typename T>\nclass TLSData : protected TLSDataContainer\n{\npublic:\n    inline TLSData()        {}\n    inline ~TLSData()       { release();            } // Release key and delete associated data\n    inline T* get() const   { return (T*)getData(); } // Get data assosiated with key\n\n     // Get data from all threads\n    inline void gather(std::vector<T*> &data) const\n    {\n        std::vector<void*> &dataVoid = reinterpret_cast<std::vector<void*>&>(data);\n        gatherData(dataVoid);\n    }\n\nprivate:\n    virtual void* createDataInstance() const {return new T;}                // Wrapper to allocate data by template\n    virtual void  deleteDataInstance(void* pData) const {delete (T*)pData;} // Wrapper to release data by template\n\n    // Disable TLS copy operations\n    TLSData(TLSData &) {};\n    TLSData& operator =(const TLSData &) {return *this;};\n};\n\n/** @brief Designed for command line parsing\n\nThe sample below demonstrates how to use CommandLineParser:\n@code\n    CommandLineParser parser(argc, argv, keys);\n    parser.about(\"Application name v1.0.0\");\n\n    if (parser.has(\"help\"))\n    {\n        parser.printMessage();\n        return 0;\n    }\n\n    int N = parser.get<int>(\"N\");\n    double fps = parser.get<double>(\"fps\");\n    String path = parser.get<String>(\"path\");\n\n    use_time_stamp = parser.has(\"timestamp\");\n\n    String img1 = parser.get<String>(0);\n    String img2 = parser.get<String>(1);\n\n    int repeat = parser.get<int>(2);\n\n    if (!parser.check())\n    {\n        parser.printErrors();\n        return 0;\n    }\n@endcode\n\n### Keys syntax\n\nThe keys parameter is a string containing several blocks, each one is enclosed in curley braces and\ndescribes one argument. Each argument contains three parts separated by the `|` symbol:\n\n-# argument names is a space-separated list of option synonyms (to mark argument as positional, prefix it with the `@` symbol)\n-# default value will be used if the argument was not provided (can be empty)\n-# help message (can be empty)\n\nFor example:\n\n@code{.cpp}\n    const String keys =\n        \"{help h usage ? |      | print this message   }\"\n        \"{@image1        |      | image1 for compare   }\"\n        \"{@image2        |<none>| image2 for compare   }\"\n        \"{@repeat        |1     | number               }\"\n        \"{path           |.     | path to file         }\"\n        \"{fps            | -1.0 | fps for output video }\"\n        \"{N count        |100   | count of objects     }\"\n        \"{ts timestamp   |      | use time stamp       }\"\n        ;\n}\n@endcode\n\nNote that there are no default values for `help` and `timestamp` so we can check their presence using the `has()` method.\nArguments with default values are considered to be always present. Use the `get()` method in these cases to check their\nactual value instead.\n\nString keys like `get<String>(\"@image1\")` return the empty string `\"\"` by default - even with an empty default value.\nUse the special `<none>` default value to enforce that the returned string must not be empty. (like in `get<String>(\"@image2\")`)\n\n### Usage\n\nFor the described keys:\n\n@code{.sh}\n    # Good call (3 positional parameters: image1, image2 and repeat; N is 200, ts is true)\n    $ ./app -N=200 1.png 2.jpg 19 -ts\n\n    # Bad call\n    $ ./app -fps=aaa\n    ERRORS:\n    Parameter 'fps': can not convert: [aaa] to [double]\n@endcode\n */\nclass CV_EXPORTS CommandLineParser\n{\npublic:\n\n    /** @brief Constructor\n\n    Initializes command line parser object\n\n    @param argc number of command line arguments (from main())\n    @param argv array of command line arguments (from main())\n    @param keys string describing acceptable command line parameters (see class description for syntax)\n    */\n    CommandLineParser(int argc, const char* const argv[], const String& keys);\n\n    /** @brief Copy constructor */\n    CommandLineParser(const CommandLineParser& parser);\n\n    /** @brief Assignment operator */\n    CommandLineParser& operator = (const CommandLineParser& parser);\n\n    /** @brief Destructor */\n    ~CommandLineParser();\n\n    /** @brief Returns application path\n\n    This method returns the path to the executable from the command line (`argv[0]`).\n\n    For example, if the application has been started with such command:\n    @code{.sh}\n    $ ./bin/my-executable\n    @endcode\n    this method will return `./bin`.\n    */\n    String getPathToApplication() const;\n\n    /** @brief Access arguments by name\n\n    Returns argument converted to selected type. If the argument is not known or can not be\n    converted to selected type, the error flag is set (can be checked with @ref check).\n\n    For example, define:\n    @code{.cpp}\n    String keys = \"{N count||}\";\n    @endcode\n\n    Call:\n    @code{.sh}\n    $ ./my-app -N=20\n    # or\n    $ ./my-app --count=20\n    @endcode\n\n    Access:\n    @code{.cpp}\n    int N = parser.get<int>(\"N\");\n    @endcode\n\n    @param name name of the argument\n    @param space_delete remove spaces from the left and right of the string\n    @tparam T the argument will be converted to this type if possible\n\n    @note You can access positional arguments by their `@`-prefixed name:\n    @code{.cpp}\n    parser.get<String>(\"@image\");\n    @endcode\n     */\n    template <typename T>\n    T get(const String& name, bool space_delete = true) const\n    {\n        T val = T();\n        getByName(name, space_delete, ParamType<T>::type, (void*)&val);\n        return val;\n    }\n\n    /** @brief Access positional arguments by index\n\n    Returns argument converted to selected type. Indexes are counted from zero.\n\n    For example, define:\n    @code{.cpp}\n    String keys = \"{@arg1||}{@arg2||}\"\n    @endcode\n\n    Call:\n    @code{.sh}\n    ./my-app abc qwe\n    @endcode\n\n    Access arguments:\n    @code{.cpp}\n    String val_1 = parser.get<String>(0); // returns \"abc\", arg1\n    String val_2 = parser.get<String>(1); // returns \"qwe\", arg2\n    @endcode\n\n    @param index index of the argument\n    @param space_delete remove spaces from the left and right of the string\n    @tparam T the argument will be converted to this type if possible\n     */\n    template <typename T>\n    T get(int index, bool space_delete = true) const\n    {\n        T val = T();\n        getByIndex(index, space_delete, ParamType<T>::type, (void*)&val);\n        return val;\n    }\n\n    /** @brief Check if field was provided in the command line\n\n    @param name argument name to check\n    */\n    bool has(const String& name) const;\n\n    /** @brief Check for parsing errors\n\n    Returns true if error occured while accessing the parameters (bad conversion, missing arguments,\n    etc.). Call @ref printErrors to print error messages list.\n     */\n    bool check() const;\n\n    /** @brief Set the about message\n\n    The about message will be shown when @ref printMessage is called, right before arguments table.\n     */\n    void about(const String& message);\n\n    /** @brief Print help message\n\n    This method will print standard help message containing the about message and arguments description.\n\n    @sa about\n    */\n    void printMessage() const;\n\n    /** @brief Print list of errors occured\n\n    @sa check\n    */\n    void printErrors() const;\n\nprotected:\n    void getByName(const String& name, bool space_delete, int type, void* dst) const;\n    void getByIndex(int index, bool space_delete, int type, void* dst) const;\n\n    struct Impl;\n    Impl* impl;\n};\n\n//! @} core_utils\n\n//! @cond IGNORED\n\n/////////////////////////////// AutoBuffer implementation ////////////////////////////////////////\n\ntemplate<typename _Tp, size_t fixed_size> inline\nAutoBuffer<_Tp, fixed_size>::AutoBuffer()\n{\n    ptr = buf;\n    sz = fixed_size;\n}\n\ntemplate<typename _Tp, size_t fixed_size> inline\nAutoBuffer<_Tp, fixed_size>::AutoBuffer(size_t _size)\n{\n    ptr = buf;\n    sz = fixed_size;\n    allocate(_size);\n}\n\ntemplate<typename _Tp, size_t fixed_size> inline\nAutoBuffer<_Tp, fixed_size>::AutoBuffer(const AutoBuffer<_Tp, fixed_size>& abuf )\n{\n    ptr = buf;\n    sz = fixed_size;\n    allocate(abuf.size());\n    for( size_t i = 0; i < sz; i++ )\n        ptr[i] = abuf.ptr[i];\n}\n\ntemplate<typename _Tp, size_t fixed_size> inline AutoBuffer<_Tp, fixed_size>&\nAutoBuffer<_Tp, fixed_size>::operator = (const AutoBuffer<_Tp, fixed_size>& abuf)\n{\n    if( this != &abuf )\n    {\n        deallocate();\n        allocate(abuf.size());\n        for( size_t i = 0; i < sz; i++ )\n            ptr[i] = abuf.ptr[i];\n    }\n    return *this;\n}\n\ntemplate<typename _Tp, size_t fixed_size> inline\nAutoBuffer<_Tp, fixed_size>::~AutoBuffer()\n{ deallocate(); }\n\ntemplate<typename _Tp, size_t fixed_size> inline void\nAutoBuffer<_Tp, fixed_size>::allocate(size_t _size)\n{\n    if(_size <= sz)\n    {\n        sz = _size;\n        return;\n    }\n    deallocate();\n    if(_size > fixed_size)\n    {\n        ptr = new _Tp[_size];\n        sz = _size;\n    }\n}\n\ntemplate<typename _Tp, size_t fixed_size> inline void\nAutoBuffer<_Tp, fixed_size>::deallocate()\n{\n    if( ptr != buf )\n    {\n        delete[] ptr;\n        ptr = buf;\n        sz = fixed_size;\n    }\n}\n\ntemplate<typename _Tp, size_t fixed_size> inline void\nAutoBuffer<_Tp, fixed_size>::resize(size_t _size)\n{\n    if(_size <= sz)\n    {\n        sz = _size;\n        return;\n    }\n    size_t i, prevsize = sz, minsize = MIN(prevsize, _size);\n    _Tp* prevptr = ptr;\n\n    ptr = _size > fixed_size ? new _Tp[_size] : buf;\n    sz = _size;\n\n    if( ptr != prevptr )\n        for( i = 0; i < minsize; i++ )\n            ptr[i] = prevptr[i];\n    for( i = prevsize; i < _size; i++ )\n        ptr[i] = _Tp();\n\n    if( prevptr != buf )\n        delete[] prevptr;\n}\n\ntemplate<typename _Tp, size_t fixed_size> inline size_t\nAutoBuffer<_Tp, fixed_size>::size() const\n{ return sz; }\n\ntemplate<typename _Tp, size_t fixed_size> inline\nAutoBuffer<_Tp, fixed_size>::operator _Tp* ()\n{ return ptr; }\n\ntemplate<typename _Tp, size_t fixed_size> inline\nAutoBuffer<_Tp, fixed_size>::operator const _Tp* () const\n{ return ptr; }\n\n#ifndef OPENCV_NOSTL\ntemplate<> inline std::string CommandLineParser::get<std::string>(int index, bool space_delete) const\n{\n    return get<String>(index, space_delete);\n}\ntemplate<> inline std::string CommandLineParser::get<std::string>(const String& name, bool space_delete) const\n{\n    return get<String>(name, space_delete);\n}\n#endif // OPENCV_NOSTL\n\n//! @endcond\n\n} //namespace cv\n\n#ifndef DISABLE_OPENCV_24_COMPATIBILITY\n#include \"opencv2/core/core_c.h\"\n#endif\n\n#endif //__OPENCV_CORE_UTILITY_H__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/va_intel.hpp",
    "content": "// This file is part of OpenCV project.\n// It is subject to the license terms in the LICENSE file found in the top-level directory\n// of this distribution and at http://opencv.org/license.html.\n\n// Copyright (C) 2015, Itseez, Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n\n#ifndef __OPENCV_CORE_VA_INTEL_HPP__\n#define __OPENCV_CORE_VA_INTEL_HPP__\n\n#ifndef __cplusplus\n#  error va_intel.hpp header must be compiled as C++\n#endif\n\n#include \"opencv2/core.hpp\"\n#include \"ocl.hpp\"\n\n#if defined(HAVE_VA)\n# include \"va/va.h\"\n#else  // HAVE_VA\n# if !defined(_VA_H_)\n    typedef void* VADisplay;\n    typedef unsigned int VASurfaceID;\n# endif // !_VA_H_\n#endif // HAVE_VA\n\nnamespace cv { namespace va_intel {\n\n/** @addtogroup core_va_intel\nThis section describes Intel VA-API/OpenCL (CL-VA) interoperability.\n\nTo enable CL-VA interoperability support, configure OpenCV using CMake with WITH_VA_INTEL=ON . Currently VA-API is\nsupported on Linux only. You should also install Intel Media Server Studio (MSS) to use this feature. You may\nhave to specify the path(s) to MSS components for cmake in environment variables: VA_INTEL_MSDK_ROOT for Media SDK\n(default is \"/opt/intel/mediasdk\"), and VA_INTEL_IOCL_ROOT for Intel OpenCL (default is \"/opt/intel/opencl\").\n\nTo use CL-VA interoperability you should first create VADisplay (libva), and then call initializeContextFromVA()\nfunction to create OpenCL context and set up interoperability.\n*/\n//! @{\n\n/////////////////// CL-VA Interoperability Functions ///////////////////\n\nnamespace ocl {\nusing namespace cv::ocl;\n\n// TODO static functions in the Context class\n/** @brief Creates OpenCL context from VA.\n@param display    - VADisplay for which CL interop should be established.\n@param tryInterop - try to set up for interoperability, if true; set up for use slow copy if false.\n@return Returns reference to OpenCL Context\n */\nCV_EXPORTS Context& initializeContextFromVA(VADisplay display, bool tryInterop = true);\n\n} // namespace cv::va_intel::ocl\n\n/** @brief Converts InputArray to VASurfaceID object.\n@param display - VADisplay object.\n@param src     - source InputArray.\n@param surface - destination VASurfaceID object.\n@param size    - size of image represented by VASurfaceID object.\n */\nCV_EXPORTS void convertToVASurface(VADisplay display, InputArray src, VASurfaceID surface, Size size);\n\n/** @brief Converts VASurfaceID object to OutputArray.\n@param display - VADisplay object.\n@param surface - source VASurfaceID object.\n@param size    - size of image represented by VASurfaceID object.\n@param dst     - destination OutputArray.\n */\nCV_EXPORTS void convertFromVASurface(VADisplay display, VASurfaceID surface, Size size, OutputArray dst);\n\n//! @}\n\n}} // namespace cv::va_intel\n\n#endif /* __OPENCV_CORE_VA_INTEL_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/version.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                        Intel License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright( C) 2000-2015, Intel Corporation, all rights reserved.\n// Copyright (C) 2011-2013, NVIDIA Corporation, all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of Intel Corporation may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n//(including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort(including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n/*\n  definition of the current version of OpenCV\n  Usefull to test in user programs\n*/\n\n#ifndef __OPENCV_VERSION_HPP__\n#define __OPENCV_VERSION_HPP__\n\n#define CV_VERSION_MAJOR    3\n#define CV_VERSION_MINOR    1\n#define CV_VERSION_REVISION 0\n#define CV_VERSION_STATUS   \"\"\n\n#define CVAUX_STR_EXP(__A)  #__A\n#define CVAUX_STR(__A)      CVAUX_STR_EXP(__A)\n\n#define CVAUX_STRW_EXP(__A)  L#__A\n#define CVAUX_STRW(__A)      CVAUX_STRW_EXP(__A)\n\n#define CV_VERSION          CVAUX_STR(CV_VERSION_MAJOR) \".\" CVAUX_STR(CV_VERSION_MINOR) \".\" CVAUX_STR(CV_VERSION_REVISION) CV_VERSION_STATUS\n\n/* old  style version constants*/\n#define CV_MAJOR_VERSION    CV_VERSION_MAJOR\n#define CV_MINOR_VERSION    CV_VERSION_MINOR\n#define CV_SUBMINOR_VERSION CV_VERSION_REVISION\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core/wimage.hpp",
    "content": "/*M//////////////////////////////////////////////////////////////////////////////\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to\n//  this license.  If you do not agree to this license, do not download,\n//  install, copy or use the software.\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2008, Google, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n//  * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//  * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//  * The name of Intel Corporation or contributors may not be used to endorse\n//     or promote products derived from this software without specific\n//     prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\"\n// and any express or implied warranties, including, but not limited to, the\n// implied warranties of merchantability and fitness for a particular purpose\n// are disclaimed. In no event shall the Intel Corporation or contributors be\n// liable for any direct, indirect, incidental, special, exemplary, or\n// consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n/////////////////////////////////////////////////////////////////////////////////\n//M*/\n\n#ifndef __OPENCV_CORE_WIMAGE_HPP__\n#define __OPENCV_CORE_WIMAGE_HPP__\n\n#include \"opencv2/core/core_c.h\"\n\n#ifdef __cplusplus\n\nnamespace cv {\n\n//! @addtogroup core\n//! @{\n\ntemplate <typename T> class WImage;\ntemplate <typename T> class WImageBuffer;\ntemplate <typename T> class WImageView;\n\ntemplate<typename T, int C> class WImageC;\ntemplate<typename T, int C> class WImageBufferC;\ntemplate<typename T, int C> class WImageViewC;\n\n// Commonly used typedefs.\ntypedef WImage<uchar>            WImage_b;\ntypedef WImageView<uchar>        WImageView_b;\ntypedef WImageBuffer<uchar>      WImageBuffer_b;\n\ntypedef WImageC<uchar, 1>        WImage1_b;\ntypedef WImageViewC<uchar, 1>    WImageView1_b;\ntypedef WImageBufferC<uchar, 1>  WImageBuffer1_b;\n\ntypedef WImageC<uchar, 3>        WImage3_b;\ntypedef WImageViewC<uchar, 3>    WImageView3_b;\ntypedef WImageBufferC<uchar, 3>  WImageBuffer3_b;\n\ntypedef WImage<float>            WImage_f;\ntypedef WImageView<float>        WImageView_f;\ntypedef WImageBuffer<float>      WImageBuffer_f;\n\ntypedef WImageC<float, 1>        WImage1_f;\ntypedef WImageViewC<float, 1>    WImageView1_f;\ntypedef WImageBufferC<float, 1>  WImageBuffer1_f;\n\ntypedef WImageC<float, 3>        WImage3_f;\ntypedef WImageViewC<float, 3>    WImageView3_f;\ntypedef WImageBufferC<float, 3>  WImageBuffer3_f;\n\n// There isn't a standard for signed and unsigned short so be more\n// explicit in the typename for these cases.\ntypedef WImage<short>            WImage_16s;\ntypedef WImageView<short>        WImageView_16s;\ntypedef WImageBuffer<short>      WImageBuffer_16s;\n\ntypedef WImageC<short, 1>        WImage1_16s;\ntypedef WImageViewC<short, 1>    WImageView1_16s;\ntypedef WImageBufferC<short, 1>  WImageBuffer1_16s;\n\ntypedef WImageC<short, 3>        WImage3_16s;\ntypedef WImageViewC<short, 3>    WImageView3_16s;\ntypedef WImageBufferC<short, 3>  WImageBuffer3_16s;\n\ntypedef WImage<ushort>            WImage_16u;\ntypedef WImageView<ushort>        WImageView_16u;\ntypedef WImageBuffer<ushort>      WImageBuffer_16u;\n\ntypedef WImageC<ushort, 1>        WImage1_16u;\ntypedef WImageViewC<ushort, 1>    WImageView1_16u;\ntypedef WImageBufferC<ushort, 1>  WImageBuffer1_16u;\n\ntypedef WImageC<ushort, 3>        WImage3_16u;\ntypedef WImageViewC<ushort, 3>    WImageView3_16u;\ntypedef WImageBufferC<ushort, 3>  WImageBuffer3_16u;\n\n/** @brief Image class which provides a thin layer around an IplImage.\n\nThe goals of the class design are:\n\n    -# All the data has explicit ownership to avoid memory leaks\n    -# No hidden allocations or copies for performance.\n    -# Easy access to OpenCV methods (which will access IPP if available)\n    -# Can easily treat external data as an image\n    -# Easy to create images which are subsets of other images\n    -# Fast pixel access which can take advantage of number of channels if known at compile time.\n\nThe WImage class is the image class which provides the data accessors. The 'W' comes from the fact\nthat it is also a wrapper around the popular but inconvenient IplImage class. A WImage can be\nconstructed either using a WImageBuffer class which allocates and frees the data, or using a\nWImageView class which constructs a subimage or a view into external data. The view class does no\nmemory management. Each class actually has two versions, one when the number of channels is known\nat compile time and one when it isn't. Using the one with the number of channels specified can\nprovide some compile time optimizations by using the fact that the number of channels is a\nconstant.\n\nWe use the convention (c,r) to refer to column c and row r with (0,0) being the upper left corner.\nThis is similar to standard Euclidean coordinates with the first coordinate varying in the\nhorizontal direction and the second coordinate varying in the vertical direction. Thus (c,r) is\nusually in the domain [0, width) X [0, height)\n\nExample usage:\n@code\nWImageBuffer3_b  im(5,7);  // Make a 5X7 3 channel image of type uchar\nWImageView3_b  sub_im(im, 2,2, 3,3); // 3X3 submatrix\nvector<float> vec(10, 3.0f);\nWImageView1_f user_im(&vec[0], 2, 5);  // 2X5 image w/ supplied data\n\nim.SetZero();  // same as cvSetZero(im.Ipl())\n*im(2, 3) = 15;  // Modify the element at column 2, row 3\nMySetRand(&sub_im);\n\n// Copy the second row into the first.  This can be done with no memory\n// allocation and will use SSE if IPP is available.\nint w = im.Width();\nim.View(0,0, w,1).CopyFrom(im.View(0,1, w,1));\n\n// Doesn't care about source of data since using WImage\nvoid MySetRand(WImage_b* im) { // Works with any number of channels\nfor (int r = 0; r < im->Height(); ++r) {\n float* row = im->Row(r);\n for (int c = 0; c < im->Width(); ++c) {\n    for (int ch = 0; ch < im->Channels(); ++ch, ++row) {\n      *row = uchar(rand() & 255);\n    }\n }\n}\n}\n@endcode\n\nFunctions that are not part of the basic image allocation, viewing, and access should come from\nOpenCV, except some useful functions that are not part of OpenCV can be found in wimage_util.h\n*/\ntemplate<typename T>\nclass WImage\n{\npublic:\n    typedef T BaseType;\n\n    // WImage is an abstract class with no other virtual methods so make the\n    // destructor virtual.\n    virtual ~WImage() = 0;\n\n    // Accessors\n    IplImage* Ipl() {return image_; }\n    const IplImage* Ipl() const {return image_; }\n    T* ImageData() { return reinterpret_cast<T*>(image_->imageData); }\n    const T* ImageData() const {\n        return reinterpret_cast<const T*>(image_->imageData);\n    }\n\n    int Width() const {return image_->width; }\n    int Height() const {return image_->height; }\n\n    // WidthStep is the number of bytes to go to the pixel with the next y coord\n    int WidthStep() const {return image_->widthStep; }\n\n    int Channels() const {return image_->nChannels; }\n    int ChannelSize() const {return sizeof(T); }  // number of bytes per channel\n\n    // Number of bytes per pixel\n    int PixelSize() const {return Channels() * ChannelSize(); }\n\n    // Return depth type (e.g. IPL_DEPTH_8U, IPL_DEPTH_32F) which is the number\n    // of bits per channel and with the signed bit set.\n    // This is known at compile time using specializations.\n    int Depth() const;\n\n    inline const T* Row(int r) const {\n        return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep);\n    }\n\n    inline T* Row(int r) {\n        return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep);\n    }\n\n    // Pixel accessors which returns a pointer to the start of the channel\n    inline T* operator() (int c, int r)  {\n        return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep) +\n            c*Channels();\n    }\n\n    inline const T* operator() (int c, int r) const  {\n        return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep) +\n            c*Channels();\n    }\n\n    // Copy the contents from another image which is just a convenience to cvCopy\n    void CopyFrom(const WImage<T>& src) { cvCopy(src.Ipl(), image_); }\n\n    // Set contents to zero which is just a convenient to cvSetZero\n    void SetZero() { cvSetZero(image_); }\n\n    // Construct a view into a region of this image\n    WImageView<T> View(int c, int r, int width, int height);\n\nprotected:\n    // Disallow copy and assignment\n    WImage(const WImage&);\n    void operator=(const WImage&);\n\n    explicit WImage(IplImage* img) : image_(img) {\n        assert(!img || img->depth == Depth());\n    }\n\n    void SetIpl(IplImage* image) {\n        assert(!image || image->depth == Depth());\n        image_ = image;\n    }\n\n    IplImage* image_;\n};\n\n\n/** Image class when both the pixel type and number of channels\nare known at compile time.  This wrapper will speed up some of the operations\nlike accessing individual pixels using the () operator.\n*/\ntemplate<typename T, int C>\nclass WImageC : public WImage<T>\n{\npublic:\n    typedef typename WImage<T>::BaseType BaseType;\n    enum { kChannels = C };\n\n    explicit WImageC(IplImage* img) : WImage<T>(img) {\n        assert(!img || img->nChannels == Channels());\n    }\n\n    // Construct a view into a region of this image\n    WImageViewC<T, C> View(int c, int r, int width, int height);\n\n    // Copy the contents from another image which is just a convenience to cvCopy\n    void CopyFrom(const WImageC<T, C>& src) {\n        cvCopy(src.Ipl(), WImage<T>::image_);\n    }\n\n    // WImageC is an abstract class with no other virtual methods so make the\n    // destructor virtual.\n    virtual ~WImageC() = 0;\n\n    int Channels() const {return C; }\n\nprotected:\n    // Disallow copy and assignment\n    WImageC(const WImageC&);\n    void operator=(const WImageC&);\n\n    void SetIpl(IplImage* image) {\n        assert(!image || image->depth == WImage<T>::Depth());\n        WImage<T>::SetIpl(image);\n    }\n};\n\n/** Image class which owns the data, so it can be allocated and is always\nfreed.  It cannot be copied but can be explicity cloned.\n*/\ntemplate<typename T>\nclass WImageBuffer : public WImage<T>\n{\npublic:\n    typedef typename WImage<T>::BaseType BaseType;\n\n    // Default constructor which creates an object that can be\n    WImageBuffer() : WImage<T>(0) {}\n\n    WImageBuffer(int width, int height, int nchannels) : WImage<T>(0) {\n        Allocate(width, height, nchannels);\n    }\n\n    // Constructor which takes ownership of a given IplImage so releases\n    // the image on destruction.\n    explicit WImageBuffer(IplImage* img) : WImage<T>(img) {}\n\n    // Allocate an image.  Does nothing if current size is the same as\n    // the new size.\n    void Allocate(int width, int height, int nchannels);\n\n    // Set the data to point to an image, releasing the old data\n    void SetIpl(IplImage* img) {\n        ReleaseImage();\n        WImage<T>::SetIpl(img);\n    }\n\n    // Clone an image which reallocates the image if of a different dimension.\n    void CloneFrom(const WImage<T>& src) {\n        Allocate(src.Width(), src.Height(), src.Channels());\n        CopyFrom(src);\n    }\n\n    ~WImageBuffer() {\n        ReleaseImage();\n    }\n\n    // Release the image if it isn't null.\n    void ReleaseImage() {\n        if (WImage<T>::image_) {\n            IplImage* image = WImage<T>::image_;\n            cvReleaseImage(&image);\n            WImage<T>::SetIpl(0);\n        }\n    }\n\n    bool IsNull() const {return WImage<T>::image_ == NULL; }\n\nprivate:\n    // Disallow copy and assignment\n    WImageBuffer(const WImageBuffer&);\n    void operator=(const WImageBuffer&);\n};\n\n/** Like a WImageBuffer class but when the number of channels is known at compile time.\n*/\ntemplate<typename T, int C>\nclass WImageBufferC : public WImageC<T, C>\n{\npublic:\n    typedef typename WImage<T>::BaseType BaseType;\n    enum { kChannels = C };\n\n    // Default constructor which creates an object that can be\n    WImageBufferC() : WImageC<T, C>(0) {}\n\n    WImageBufferC(int width, int height) : WImageC<T, C>(0) {\n        Allocate(width, height);\n    }\n\n    // Constructor which takes ownership of a given IplImage so releases\n    // the image on destruction.\n    explicit WImageBufferC(IplImage* img) : WImageC<T, C>(img) {}\n\n    // Allocate an image.  Does nothing if current size is the same as\n    // the new size.\n    void Allocate(int width, int height);\n\n    // Set the data to point to an image, releasing the old data\n    void SetIpl(IplImage* img) {\n        ReleaseImage();\n        WImageC<T, C>::SetIpl(img);\n    }\n\n    // Clone an image which reallocates the image if of a different dimension.\n    void CloneFrom(const WImageC<T, C>& src) {\n        Allocate(src.Width(), src.Height());\n        CopyFrom(src);\n    }\n\n    ~WImageBufferC() {\n        ReleaseImage();\n    }\n\n    // Release the image if it isn't null.\n    void ReleaseImage() {\n        if (WImage<T>::image_) {\n            IplImage* image = WImage<T>::image_;\n            cvReleaseImage(&image);\n            WImageC<T, C>::SetIpl(0);\n        }\n    }\n\n    bool IsNull() const {return WImage<T>::image_ == NULL; }\n\nprivate:\n    // Disallow copy and assignment\n    WImageBufferC(const WImageBufferC&);\n    void operator=(const WImageBufferC&);\n};\n\n/** View into an image class which allows treating a subimage as an image or treating external data\nas an image\n*/\ntemplate<typename T> class WImageView : public WImage<T>\n{\npublic:\n    typedef typename WImage<T>::BaseType BaseType;\n\n    // Construct a subimage.  No checks are done that the subimage lies\n    // completely inside the original image.\n    WImageView(WImage<T>* img, int c, int r, int width, int height);\n\n    // Refer to external data.\n    // If not given width_step assumed to be same as width.\n    WImageView(T* data, int width, int height, int channels, int width_step = -1);\n\n    // Refer to external data.  This does NOT take ownership\n    // of the supplied IplImage.\n    WImageView(IplImage* img) : WImage<T>(img) {}\n\n    // Copy constructor\n    WImageView(const WImage<T>& img) : WImage<T>(0) {\n        header_ = *(img.Ipl());\n        WImage<T>::SetIpl(&header_);\n    }\n\n    WImageView& operator=(const WImage<T>& img) {\n        header_ = *(img.Ipl());\n        WImage<T>::SetIpl(&header_);\n        return *this;\n    }\n\nprotected:\n    IplImage header_;\n};\n\n\ntemplate<typename T, int C>\nclass WImageViewC : public WImageC<T, C>\n{\npublic:\n    typedef typename WImage<T>::BaseType BaseType;\n    enum { kChannels = C };\n\n    // Default constructor needed for vectors of views.\n    WImageViewC();\n\n    virtual ~WImageViewC() {}\n\n    // Construct a subimage.  No checks are done that the subimage lies\n    // completely inside the original image.\n    WImageViewC(WImageC<T, C>* img,\n        int c, int r, int width, int height);\n\n    // Refer to external data\n    WImageViewC(T* data, int width, int height, int width_step = -1);\n\n    // Refer to external data.  This does NOT take ownership\n    // of the supplied IplImage.\n    WImageViewC(IplImage* img) : WImageC<T, C>(img) {}\n\n    // Copy constructor which does a shallow copy to allow multiple views\n    // of same data.  gcc-4.1.1 gets confused if both versions of\n    // the constructor and assignment operator are not provided.\n    WImageViewC(const WImageC<T, C>& img) : WImageC<T, C>(0) {\n        header_ = *(img.Ipl());\n        WImageC<T, C>::SetIpl(&header_);\n    }\n    WImageViewC(const WImageViewC<T, C>& img) : WImageC<T, C>(0) {\n        header_ = *(img.Ipl());\n        WImageC<T, C>::SetIpl(&header_);\n    }\n\n    WImageViewC& operator=(const WImageC<T, C>& img) {\n        header_ = *(img.Ipl());\n        WImageC<T, C>::SetIpl(&header_);\n        return *this;\n    }\n    WImageViewC& operator=(const WImageViewC<T, C>& img) {\n        header_ = *(img.Ipl());\n        WImageC<T, C>::SetIpl(&header_);\n        return *this;\n    }\n\nprotected:\n    IplImage header_;\n};\n\n\n// Specializations for depth\ntemplate<>\ninline int WImage<uchar>::Depth() const {return IPL_DEPTH_8U; }\ntemplate<>\ninline int WImage<signed char>::Depth() const {return IPL_DEPTH_8S; }\ntemplate<>\ninline int WImage<short>::Depth() const {return IPL_DEPTH_16S; }\ntemplate<>\ninline int WImage<ushort>::Depth() const {return IPL_DEPTH_16U; }\ntemplate<>\ninline int WImage<int>::Depth() const {return IPL_DEPTH_32S; }\ntemplate<>\ninline int WImage<float>::Depth() const {return IPL_DEPTH_32F; }\ntemplate<>\ninline int WImage<double>::Depth() const {return IPL_DEPTH_64F; }\n\ntemplate<typename T> inline WImage<T>::~WImage() {}\ntemplate<typename T, int C> inline WImageC<T, C>::~WImageC() {}\n\ntemplate<typename T>\ninline void WImageBuffer<T>::Allocate(int width, int height, int nchannels)\n{\n    if (IsNull() || WImage<T>::Width() != width ||\n        WImage<T>::Height() != height || WImage<T>::Channels() != nchannels) {\n        ReleaseImage();\n        WImage<T>::image_ = cvCreateImage(cvSize(width, height),\n            WImage<T>::Depth(), nchannels);\n    }\n}\n\ntemplate<typename T, int C>\ninline void WImageBufferC<T, C>::Allocate(int width, int height)\n{\n    if (IsNull() || WImage<T>::Width() != width || WImage<T>::Height() != height) {\n        ReleaseImage();\n        WImageC<T, C>::SetIpl(cvCreateImage(cvSize(width, height),WImage<T>::Depth(), C));\n    }\n}\n\ntemplate<typename T>\nWImageView<T>::WImageView(WImage<T>* img, int c, int r, int width, int height)\n        : WImage<T>(0)\n{\n    header_ = *(img->Ipl());\n    header_.imageData = reinterpret_cast<char*>((*img)(c, r));\n    header_.width = width;\n    header_.height = height;\n    WImage<T>::SetIpl(&header_);\n}\n\ntemplate<typename T>\nWImageView<T>::WImageView(T* data, int width, int height, int nchannels, int width_step)\n          : WImage<T>(0)\n{\n    cvInitImageHeader(&header_, cvSize(width, height), WImage<T>::Depth(), nchannels);\n    header_.imageData = reinterpret_cast<char*>(data);\n    if (width_step > 0) {\n        header_.widthStep = width_step;\n    }\n    WImage<T>::SetIpl(&header_);\n}\n\ntemplate<typename T, int C>\nWImageViewC<T, C>::WImageViewC(WImageC<T, C>* img, int c, int r, int width, int height)\n        : WImageC<T, C>(0)\n{\n    header_ = *(img->Ipl());\n    header_.imageData = reinterpret_cast<char*>((*img)(c, r));\n    header_.width = width;\n    header_.height = height;\n    WImageC<T, C>::SetIpl(&header_);\n}\n\ntemplate<typename T, int C>\nWImageViewC<T, C>::WImageViewC() : WImageC<T, C>(0) {\n    cvInitImageHeader(&header_, cvSize(0, 0), WImage<T>::Depth(), C);\n    header_.imageData = reinterpret_cast<char*>(0);\n    WImageC<T, C>::SetIpl(&header_);\n}\n\ntemplate<typename T, int C>\nWImageViewC<T, C>::WImageViewC(T* data, int width, int height, int width_step)\n    : WImageC<T, C>(0)\n{\n    cvInitImageHeader(&header_, cvSize(width, height), WImage<T>::Depth(), C);\n    header_.imageData = reinterpret_cast<char*>(data);\n    if (width_step > 0) {\n        header_.widthStep = width_step;\n    }\n    WImageC<T, C>::SetIpl(&header_);\n}\n\n// Construct a view into a region of an image\ntemplate<typename T>\nWImageView<T> WImage<T>::View(int c, int r, int width, int height) {\n    return WImageView<T>(this, c, r, width, height);\n}\n\ntemplate<typename T, int C>\nWImageViewC<T, C> WImageC<T, C>::View(int c, int r, int width, int height) {\n    return WImageViewC<T, C>(this, c, r, width, height);\n}\n\n//! @} core\n\n}  // end of namespace\n\n#endif // __cplusplus\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/core.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2015, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2015, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2015, Itseez Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_CORE_HPP__\n#define __OPENCV_CORE_HPP__\n\n#ifndef __cplusplus\n#  error core.hpp header must be compiled as C++\n#endif\n\n#include \"opencv2/core/cvdef.h\"\n#include \"opencv2/core/version.hpp\"\n#include \"opencv2/core/base.hpp\"\n#include \"opencv2/core/cvstd.hpp\"\n#include \"opencv2/core/traits.hpp\"\n#include \"opencv2/core/matx.hpp\"\n#include \"opencv2/core/types.hpp\"\n#include \"opencv2/core/mat.hpp\"\n#include \"opencv2/core/persistence.hpp\"\n\n/**\n@defgroup core Core functionality\n@{\n    @defgroup core_basic Basic structures\n    @defgroup core_c C structures and operations\n    @{\n        @defgroup core_c_glue Connections with C++\n    @}\n    @defgroup core_array Operations on arrays\n    @defgroup core_xml XML/YAML Persistence\n    @defgroup core_cluster Clustering\n    @defgroup core_utils Utility and system functions and macros\n    @{\n        @defgroup core_utils_sse SSE utilities\n        @defgroup core_utils_neon NEON utilities\n    @}\n    @defgroup core_opengl OpenGL interoperability\n    @defgroup core_ipp Intel IPP Asynchronous C/C++ Converters\n    @defgroup core_optim Optimization Algorithms\n    @defgroup core_directx DirectX interoperability\n    @defgroup core_eigen Eigen support\n    @defgroup core_opencl OpenCL support\n    @defgroup core_va_intel Intel VA-API/OpenCL (CL-VA) interoperability\n    @defgroup core_hal Hardware Acceleration Layer\n    @{\n        @defgroup core_hal_functions Functions\n        @defgroup core_hal_interface Interface\n        @defgroup core_hal_intrin Universal intrinsics\n        @{\n            @defgroup core_hal_intrin_impl Private implementation helpers\n        @}\n    @}\n@}\n */\n\nnamespace cv {\n\n//! @addtogroup core_utils\n//! @{\n\n/*! @brief Class passed to an error.\n\nThis class encapsulates all or almost all necessary\ninformation about the error happened in the program. The exception is\nusually constructed and thrown implicitly via CV_Error and CV_Error_ macros.\n@see error\n */\nclass CV_EXPORTS Exception : public std::exception\n{\npublic:\n    /*!\n     Default constructor\n     */\n    Exception();\n    /*!\n     Full constructor. Normally the constuctor is not called explicitly.\n     Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used.\n    */\n    Exception(int _code, const String& _err, const String& _func, const String& _file, int _line);\n    virtual ~Exception() throw();\n\n    /*!\n     \\return the error description and the context as a text string.\n    */\n    virtual const char *what() const throw();\n    void formatMessage();\n\n    String msg; ///< the formatted error message\n\n    int code; ///< error code @see CVStatus\n    String err; ///< error description\n    String func; ///< function name. Available only when the compiler supports getting it\n    String file; ///< source file name where the error has occured\n    int line; ///< line number in the source file where the error has occured\n};\n\n/*! @brief Signals an error and raises the exception.\n\nBy default the function prints information about the error to stderr,\nthen it either stops if cv::setBreakOnError() had been called before or raises the exception.\nIt is possible to alternate error processing by using cv::redirectError().\n@param exc the exception raisen.\n@deprecated drop this version\n */\nCV_EXPORTS void error( const Exception& exc );\n\nenum SortFlags { SORT_EVERY_ROW    = 0, //!< each matrix row is sorted independently\n                 SORT_EVERY_COLUMN = 1, //!< each matrix column is sorted\n                                        //!< independently; this flag and the previous one are\n                                        //!< mutually exclusive.\n                 SORT_ASCENDING    = 0, //!< each matrix row is sorted in the ascending\n                                        //!< order.\n                 SORT_DESCENDING   = 16 //!< each matrix row is sorted in the\n                                        //!< descending order; this flag and the previous one are also\n                                        //!< mutually exclusive.\n               };\n\n//! @} core_utils\n\n//! @addtogroup core\n//! @{\n\n//! Covariation flags\nenum CovarFlags {\n    /** The output covariance matrix is calculated as:\n       \\f[\\texttt{scale}   \\cdot  [  \\texttt{vects}  [0]-  \\texttt{mean}  , \\texttt{vects}  [1]-  \\texttt{mean}  ,...]^T  \\cdot  [ \\texttt{vects}  [0]- \\texttt{mean}  , \\texttt{vects}  [1]- \\texttt{mean}  ,...],\\f]\n       The covariance matrix will be nsamples x nsamples. Such an unusual covariance matrix is used\n       for fast PCA of a set of very large vectors (see, for example, the EigenFaces technique for\n       face recognition). Eigenvalues of this \"scrambled\" matrix match the eigenvalues of the true\n       covariance matrix. The \"true\" eigenvectors can be easily calculated from the eigenvectors of\n       the \"scrambled\" covariance matrix. */\n    COVAR_SCRAMBLED = 0,\n    /**The output covariance matrix is calculated as:\n        \\f[\\texttt{scale}   \\cdot  [  \\texttt{vects}  [0]-  \\texttt{mean}  , \\texttt{vects}  [1]-  \\texttt{mean}  ,...]  \\cdot  [ \\texttt{vects}  [0]- \\texttt{mean}  , \\texttt{vects}  [1]- \\texttt{mean}  ,...]^T,\\f]\n        covar will be a square matrix of the same size as the total number of elements in each input\n        vector. One and only one of COVAR_SCRAMBLED and COVAR_NORMAL must be specified.*/\n    COVAR_NORMAL    = 1,\n    /** If the flag is specified, the function does not calculate mean from\n        the input vectors but, instead, uses the passed mean vector. This is useful if mean has been\n        pre-calculated or known in advance, or if the covariance matrix is calculated by parts. In\n        this case, mean is not a mean vector of the input sub-set of vectors but rather the mean\n        vector of the whole set.*/\n    COVAR_USE_AVG   = 2,\n    /** If the flag is specified, the covariance matrix is scaled. In the\n        \"normal\" mode, scale is 1./nsamples . In the \"scrambled\" mode, scale is the reciprocal of the\n        total number of elements in each input vector. By default (if the flag is not specified), the\n        covariance matrix is not scaled ( scale=1 ).*/\n    COVAR_SCALE     = 4,\n    /** If the flag is\n        specified, all the input vectors are stored as rows of the samples matrix. mean should be a\n        single-row vector in this case.*/\n    COVAR_ROWS      = 8,\n    /** If the flag is\n        specified, all the input vectors are stored as columns of the samples matrix. mean should be a\n        single-column vector in this case.*/\n    COVAR_COLS      = 16\n};\n\n//! k-Means flags\nenum KmeansFlags {\n    /** Select random initial centers in each attempt.*/\n    KMEANS_RANDOM_CENTERS     = 0,\n    /** Use kmeans++ center initialization by Arthur and Vassilvitskii [Arthur2007].*/\n    KMEANS_PP_CENTERS         = 2,\n    /** During the first (and possibly the only) attempt, use the\n        user-supplied labels instead of computing them from the initial centers. For the second and\n        further attempts, use the random or semi-random centers. Use one of KMEANS_\\*_CENTERS flag\n        to specify the exact method.*/\n    KMEANS_USE_INITIAL_LABELS = 1\n};\n\n//! type of line\nenum LineTypes {\n    FILLED  = -1,\n    LINE_4  = 4, //!< 4-connected line\n    LINE_8  = 8, //!< 8-connected line\n    LINE_AA = 16 //!< antialiased line\n};\n\n//! Only a subset of Hershey fonts\n//! <http://sources.isc.org/utils/misc/hershey-font.txt> are supported\nenum HersheyFonts {\n    FONT_HERSHEY_SIMPLEX        = 0, //!< normal size sans-serif font\n    FONT_HERSHEY_PLAIN          = 1, //!< small size sans-serif font\n    FONT_HERSHEY_DUPLEX         = 2, //!< normal size sans-serif font (more complex than FONT_HERSHEY_SIMPLEX)\n    FONT_HERSHEY_COMPLEX        = 3, //!< normal size serif font\n    FONT_HERSHEY_TRIPLEX        = 4, //!< normal size serif font (more complex than FONT_HERSHEY_COMPLEX)\n    FONT_HERSHEY_COMPLEX_SMALL  = 5, //!< smaller version of FONT_HERSHEY_COMPLEX\n    FONT_HERSHEY_SCRIPT_SIMPLEX = 6, //!< hand-writing style font\n    FONT_HERSHEY_SCRIPT_COMPLEX = 7, //!< more complex variant of FONT_HERSHEY_SCRIPT_SIMPLEX\n    FONT_ITALIC                 = 16 //!< flag for italic font\n};\n\nenum ReduceTypes { REDUCE_SUM = 0, //!< the output is the sum of all rows/columns of the matrix.\n                   REDUCE_AVG = 1, //!< the output is the mean vector of all rows/columns of the matrix.\n                   REDUCE_MAX = 2, //!< the output is the maximum (column/row-wise) of all rows/columns of the matrix.\n                   REDUCE_MIN = 3  //!< the output is the minimum (column/row-wise) of all rows/columns of the matrix.\n                 };\n\n\n/** @brief Swaps two matrices\n*/\nCV_EXPORTS void swap(Mat& a, Mat& b);\n/** @overload */\nCV_EXPORTS void swap( UMat& a, UMat& b );\n\n//! @} core\n\n//! @addtogroup core_array\n//! @{\n\n/** @brief Computes the source location of an extrapolated pixel.\n\nThe function computes and returns the coordinate of a donor pixel corresponding to the specified\nextrapolated pixel when using the specified extrapolation border mode. For example, if you use\ncv::BORDER_WRAP mode in the horizontal direction, cv::BORDER_REFLECT_101 in the vertical direction and\nwant to compute value of the \"virtual\" pixel Point(-5, 100) in a floating-point image img , it\nlooks like:\n@code{.cpp}\n    float val = img.at<float>(borderInterpolate(100, img.rows, cv::BORDER_REFLECT_101),\n                              borderInterpolate(-5, img.cols, cv::BORDER_WRAP));\n@endcode\nNormally, the function is not called directly. It is used inside filtering functions and also in\ncopyMakeBorder.\n@param p 0-based coordinate of the extrapolated pixel along one of the axes, likely \\<0 or \\>= len\n@param len Length of the array along the corresponding axis.\n@param borderType Border type, one of the cv::BorderTypes, except for cv::BORDER_TRANSPARENT and\ncv::BORDER_ISOLATED . When borderType==cv::BORDER_CONSTANT , the function always returns -1, regardless\nof p and len.\n\n@sa copyMakeBorder\n*/\nCV_EXPORTS_W int borderInterpolate(int p, int len, int borderType);\n\n/** @brief Forms a border around an image.\n\nThe function copies the source image into the middle of the destination image. The areas to the\nleft, to the right, above and below the copied source image will be filled with extrapolated\npixels. This is not what filtering functions based on it do (they extrapolate pixels on-fly), but\nwhat other more complex functions, including your own, may do to simplify image boundary handling.\n\nThe function supports the mode when src is already in the middle of dst . In this case, the\nfunction does not copy src itself but simply constructs the border, for example:\n\n@code{.cpp}\n    // let border be the same in all directions\n    int border=2;\n    // constructs a larger image to fit both the image and the border\n    Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth());\n    // select the middle part of it w/o copying data\n    Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows));\n    // convert image from RGB to grayscale\n    cvtColor(rgb, gray, COLOR_RGB2GRAY);\n    // form a border in-place\n    copyMakeBorder(gray, gray_buf, border, border,\n                   border, border, BORDER_REPLICATE);\n    // now do some custom filtering ...\n    ...\n@endcode\n@note When the source image is a part (ROI) of a bigger image, the function will try to use the\npixels outside of the ROI to form a border. To disable this feature and always do extrapolation, as\nif src was not a ROI, use borderType | BORDER_ISOLATED.\n\n@param src Source image.\n@param dst Destination image of the same type as src and the size Size(src.cols+left+right,\nsrc.rows+top+bottom) .\n@param top\n@param bottom\n@param left\n@param right Parameter specifying how many pixels in each direction from the source image rectangle\nto extrapolate. For example, top=1, bottom=1, left=1, right=1 mean that 1 pixel-wide border needs\nto be built.\n@param borderType Border type. See borderInterpolate for details.\n@param value Border value if borderType==BORDER_CONSTANT .\n\n@sa  borderInterpolate\n*/\nCV_EXPORTS_W void copyMakeBorder(InputArray src, OutputArray dst,\n                                 int top, int bottom, int left, int right,\n                                 int borderType, const Scalar& value = Scalar() );\n\n/** @brief Calculates the per-element sum of two arrays or an array and a scalar.\n\nThe function add calculates:\n- Sum of two arrays when both input arrays have the same size and the same number of channels:\n\\f[\\texttt{dst}(I) =  \\texttt{saturate} ( \\texttt{src1}(I) +  \\texttt{src2}(I)) \\quad \\texttt{if mask}(I) \\ne0\\f]\n- Sum of an array and a scalar when src2 is constructed from Scalar or has the same number of\nelements as `src1.channels()`:\n\\f[\\texttt{dst}(I) =  \\texttt{saturate} ( \\texttt{src1}(I) +  \\texttt{src2} ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n- Sum of a scalar and an array when src1 is constructed from Scalar or has the same number of\nelements as `src2.channels()`:\n\\f[\\texttt{dst}(I) =  \\texttt{saturate} ( \\texttt{src1} +  \\texttt{src2}(I) ) \\quad \\texttt{if mask}(I) \\ne0\\f]\nwhere `I` is a multi-dimensional index of array elements. In case of multi-channel arrays, each\nchannel is processed independently.\n\nThe first function in the list above can be replaced with matrix expressions:\n@code{.cpp}\n    dst = src1 + src2;\n    dst += src1; // equivalent to add(dst, src1, dst);\n@endcode\nThe input arrays and the output array can all have the same or different depths. For example, you\ncan add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit\nfloating-point array. Depth of the output array is determined by the dtype parameter. In the second\nand third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can\nbe set to the default -1. In this case, the output array will have the same depth as the input\narray, be it src1, src2 or both.\n@note Saturation is not applied when the output array has the depth CV_32S. You may even get\nresult of an incorrect sign in the case of overflow.\n@param src1 first input array or a scalar.\n@param src2 second input array or a scalar.\n@param dst output array that has the same size and number of channels as the input array(s); the\ndepth is defined by dtype or src1/src2.\n@param mask optional operation mask - 8-bit single channel array, that specifies elements of the\noutput array to be changed.\n@param dtype optional depth of the output array (see the discussion below).\n@sa subtract, addWeighted, scaleAdd, Mat::convertTo\n*/\nCV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst,\n                      InputArray mask = noArray(), int dtype = -1);\n\n/** @brief Calculates the per-element difference between two arrays or array and a scalar.\n\nThe function subtract calculates:\n- Difference between two arrays, when both input arrays have the same size and the same number of\nchannels:\n    \\f[\\texttt{dst}(I) =  \\texttt{saturate} ( \\texttt{src1}(I) -  \\texttt{src2}(I)) \\quad \\texttt{if mask}(I) \\ne0\\f]\n- Difference between an array and a scalar, when src2 is constructed from Scalar or has the same\nnumber of elements as `src1.channels()`:\n    \\f[\\texttt{dst}(I) =  \\texttt{saturate} ( \\texttt{src1}(I) -  \\texttt{src2} ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n- Difference between a scalar and an array, when src1 is constructed from Scalar or has the same\nnumber of elements as `src2.channels()`:\n    \\f[\\texttt{dst}(I) =  \\texttt{saturate} ( \\texttt{src1} -  \\texttt{src2}(I) ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n- The reverse difference between a scalar and an array in the case of `SubRS`:\n    \\f[\\texttt{dst}(I) =  \\texttt{saturate} ( \\texttt{src2} -  \\texttt{src1}(I) ) \\quad \\texttt{if mask}(I) \\ne0\\f]\nwhere I is a multi-dimensional index of array elements. In case of multi-channel arrays, each\nchannel is processed independently.\n\nThe first function in the list above can be replaced with matrix expressions:\n@code{.cpp}\n    dst = src1 - src2;\n    dst -= src1; // equivalent to subtract(dst, src1, dst);\n@endcode\nThe input arrays and the output array can all have the same or different depths. For example, you\ncan subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of\nthe output array is determined by dtype parameter. In the second and third cases above, as well as\nin the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this\ncase the output array will have the same depth as the input array, be it src1, src2 or both.\n@note Saturation is not applied when the output array has the depth CV_32S. You may even get\nresult of an incorrect sign in the case of overflow.\n@param src1 first input array or a scalar.\n@param src2 second input array or a scalar.\n@param dst output array of the same size and the same number of channels as the input array.\n@param mask optional operation mask; this is an 8-bit single channel array that specifies elements\nof the output array to be changed.\n@param dtype optional depth of the output array\n@sa  add, addWeighted, scaleAdd, Mat::convertTo\n  */\nCV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst,\n                           InputArray mask = noArray(), int dtype = -1);\n\n\n/** @brief Calculates the per-element scaled product of two arrays.\n\nThe function multiply calculates the per-element product of two arrays:\n\n\\f[\\texttt{dst} (I)= \\texttt{saturate} ( \\texttt{scale} \\cdot \\texttt{src1} (I)  \\cdot \\texttt{src2} (I))\\f]\n\nThere is also a @ref MatrixExpressions -friendly variant of the first function. See Mat::mul .\n\nFor a not-per-element matrix product, see gemm .\n\n@note Saturation is not applied when the output array has the depth\nCV_32S. You may even get result of an incorrect sign in the case of\noverflow.\n@param src1 first input array.\n@param src2 second input array of the same size and the same type as src1.\n@param dst output array of the same size and type as src1.\n@param scale optional scale factor.\n@param dtype optional depth of the output array\n@sa add, subtract, divide, scaleAdd, addWeighted, accumulate, accumulateProduct, accumulateSquare,\nMat::convertTo\n*/\nCV_EXPORTS_W void multiply(InputArray src1, InputArray src2,\n                           OutputArray dst, double scale = 1, int dtype = -1);\n\n/** @brief Performs per-element division of two arrays or a scalar by an array.\n\nThe functions divide divide one array by another:\n\\f[\\texttt{dst(I) = saturate(src1(I)*scale/src2(I))}\\f]\nor a scalar by an array when there is no src1 :\n\\f[\\texttt{dst(I) = saturate(scale/src2(I))}\\f]\n\nWhen src2(I) is zero, dst(I) will also be zero. Different channels of\nmulti-channel arrays are processed independently.\n\n@note Saturation is not applied when the output array has the depth CV_32S. You may even get\nresult of an incorrect sign in the case of overflow.\n@param src1 first input array.\n@param src2 second input array of the same size and type as src1.\n@param scale scalar factor.\n@param dst output array of the same size and type as src2.\n@param dtype optional depth of the output array; if -1, dst will have depth src2.depth(), but in\ncase of an array-by-array division, you can only pass -1 when src1.depth()==src2.depth().\n@sa  multiply, add, subtract\n*/\nCV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst,\n                         double scale = 1, int dtype = -1);\n\n/** @overload */\nCV_EXPORTS_W void divide(double scale, InputArray src2,\n                         OutputArray dst, int dtype = -1);\n\n/** @brief Calculates the sum of a scaled array and another array.\n\nThe function scaleAdd is one of the classical primitive linear algebra operations, known as DAXPY\nor SAXPY in [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms). It calculates\nthe sum of a scaled array and another array:\n\\f[\\texttt{dst} (I)= \\texttt{scale} \\cdot \\texttt{src1} (I) +  \\texttt{src2} (I)\\f]\nThe function can also be emulated with a matrix expression, for example:\n@code{.cpp}\n    Mat A(3, 3, CV_64F);\n    ...\n    A.row(0) = A.row(1)*2 + A.row(2);\n@endcode\n@param src1 first input array.\n@param alpha scale factor for the first array.\n@param src2 second input array of the same size and type as src1.\n@param dst output array of the same size and type as src1.\n@sa add, addWeighted, subtract, Mat::dot, Mat::convertTo\n*/\nCV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst);\n\n/** @brief Calculates the weighted sum of two arrays.\n\nThe function addWeighted calculates the weighted sum of two arrays as follows:\n\\f[\\texttt{dst} (I)= \\texttt{saturate} ( \\texttt{src1} (I)* \\texttt{alpha} +  \\texttt{src2} (I)* \\texttt{beta} +  \\texttt{gamma} )\\f]\nwhere I is a multi-dimensional index of array elements. In case of multi-channel arrays, each\nchannel is processed independently.\nThe function can be replaced with a matrix expression:\n@code{.cpp}\n    dst = src1*alpha + src2*beta + gamma;\n@endcode\n@note Saturation is not applied when the output array has the depth CV_32S. You may even get\nresult of an incorrect sign in the case of overflow.\n@param src1 first input array.\n@param alpha weight of the first array elements.\n@param src2 second input array of the same size and channel number as src1.\n@param beta weight of the second array elements.\n@param gamma scalar added to each sum.\n@param dst output array that has the same size and number of channels as the input arrays.\n@param dtype optional depth of the output array; when both input arrays have the same depth, dtype\ncan be set to -1, which will be equivalent to src1.depth().\n@sa  add, subtract, scaleAdd, Mat::convertTo\n*/\nCV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2,\n                              double beta, double gamma, OutputArray dst, int dtype = -1);\n\n/** @brief Scales, calculates absolute values, and converts the result to 8-bit.\n\nOn each element of the input array, the function convertScaleAbs\nperforms three operations sequentially: scaling, taking an absolute\nvalue, conversion to an unsigned 8-bit type:\n\\f[\\texttt{dst} (I)= \\texttt{saturate\\_cast<uchar>} (| \\texttt{src} (I)* \\texttt{alpha} +  \\texttt{beta} |)\\f]\nIn case of multi-channel arrays, the function processes each channel\nindependently. When the output is not 8-bit, the operation can be\nemulated by calling the Mat::convertTo method (or by using matrix\nexpressions) and then by calculating an absolute value of the result.\nFor example:\n@code{.cpp}\n    Mat_<float> A(30,30);\n    randu(A, Scalar(-100), Scalar(100));\n    Mat_<float> B = A*5 + 3;\n    B = abs(B);\n    // Mat_<float> B = abs(A*5+3) will also do the job,\n    // but it will allocate a temporary matrix\n@endcode\n@param src input array.\n@param dst output array.\n@param alpha optional scale factor.\n@param beta optional delta added to the scaled values.\n@sa  Mat::convertTo, cv::abs(const Mat&)\n*/\nCV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst,\n                                  double alpha = 1, double beta = 0);\n\n/** @brief Performs a look-up table transform of an array.\n\nThe function LUT fills the output array with values from the look-up table. Indices of the entries\nare taken from the input array. That is, the function processes each element of src as follows:\n\\f[\\texttt{dst} (I)  \\leftarrow \\texttt{lut(src(I) + d)}\\f]\nwhere\n\\f[d =  \\fork{0}{if \\(\\texttt{src}\\) has depth \\(\\texttt{CV_8U}\\)}{128}{if \\(\\texttt{src}\\) has depth \\(\\texttt{CV_8S}\\)}\\f]\n@param src input array of 8-bit elements.\n@param lut look-up table of 256 elements; in case of multi-channel input array, the table should\neither have a single channel (in this case the same table is used for all channels) or the same\nnumber of channels as in the input array.\n@param dst output array of the same size and number of channels as src, and the same depth as lut.\n@sa  convertScaleAbs, Mat::convertTo\n*/\nCV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst);\n\n/** @brief Calculates the sum of array elements.\n\nThe functions sum calculate and return the sum of array elements,\nindependently for each channel.\n@param src input array that must have from 1 to 4 channels.\n@sa  countNonZero, mean, meanStdDev, norm, minMaxLoc, reduce\n*/\nCV_EXPORTS_AS(sumElems) Scalar sum(InputArray src);\n\n/** @brief Counts non-zero array elements.\n\nThe function returns the number of non-zero elements in src :\n\\f[\\sum _{I: \\; \\texttt{src} (I) \\ne0 } 1\\f]\n@param src single-channel array.\n@sa  mean, meanStdDev, norm, minMaxLoc, calcCovarMatrix\n*/\nCV_EXPORTS_W int countNonZero( InputArray src );\n\n/** @brief Returns the list of locations of non-zero pixels\n\nGiven a binary matrix (likely returned from an operation such\nas threshold(), compare(), >, ==, etc, return all of\nthe non-zero indices as a cv::Mat or std::vector<cv::Point> (x,y)\nFor example:\n@code{.cpp}\n    cv::Mat binaryImage; // input, binary image\n    cv::Mat locations;   // output, locations of non-zero pixels\n    cv::findNonZero(binaryImage, locations);\n\n    // access pixel coordinates\n    Point pnt = locations.at<Point>(i);\n@endcode\nor\n@code{.cpp}\n    cv::Mat binaryImage; // input, binary image\n    vector<Point> locations;   // output, locations of non-zero pixels\n    cv::findNonZero(binaryImage, locations);\n\n    // access pixel coordinates\n    Point pnt = locations[i];\n@endcode\n@param src single-channel array (type CV_8UC1)\n@param idx the output array, type of cv::Mat or std::vector<Point>, corresponding to non-zero indices in the input\n*/\nCV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx );\n\n/** @brief Calculates an average (mean) of array elements.\n\nThe function mean calculates the mean value M of array elements,\nindependently for each channel, and return it:\n\\f[\\begin{array}{l} N =  \\sum _{I: \\; \\texttt{mask} (I) \\ne 0} 1 \\\\ M_c =  \\left ( \\sum _{I: \\; \\texttt{mask} (I) \\ne 0}{ \\texttt{mtx} (I)_c} \\right )/N \\end{array}\\f]\nWhen all the mask elements are 0's, the functions return Scalar::all(0)\n@param src input array that should have from 1 to 4 channels so that the result can be stored in\nScalar_ .\n@param mask optional operation mask.\n@sa  countNonZero, meanStdDev, norm, minMaxLoc\n*/\nCV_EXPORTS_W Scalar mean(InputArray src, InputArray mask = noArray());\n\n/** Calculates a mean and standard deviation of array elements.\n\nThe function meanStdDev calculates the mean and the standard deviation M\nof array elements independently for each channel and returns it via the\noutput parameters:\n\\f[\\begin{array}{l} N =  \\sum _{I, \\texttt{mask} (I)  \\ne 0} 1 \\\\ \\texttt{mean} _c =  \\frac{\\sum_{ I: \\; \\texttt{mask}(I) \\ne 0} \\texttt{src} (I)_c}{N} \\\\ \\texttt{stddev} _c =  \\sqrt{\\frac{\\sum_{ I: \\; \\texttt{mask}(I) \\ne 0} \\left ( \\texttt{src} (I)_c -  \\texttt{mean} _c \\right )^2}{N}} \\end{array}\\f]\nWhen all the mask elements are 0's, the functions return\nmean=stddev=Scalar::all(0).\n@note The calculated standard deviation is only the diagonal of the\ncomplete normalized covariance matrix. If the full matrix is needed, you\ncan reshape the multi-channel array M x N to the single-channel array\nM\\*N x mtx.channels() (only possible when the matrix is continuous) and\nthen pass the matrix to calcCovarMatrix .\n@param src input array that should have from 1 to 4 channels so that the results can be stored in\nScalar_ 's.\n@param mean output parameter: calculated mean value.\n@param stddev output parameter: calculateded standard deviation.\n@param mask optional operation mask.\n@sa  countNonZero, mean, norm, minMaxLoc, calcCovarMatrix\n*/\nCV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev,\n                             InputArray mask=noArray());\n\n/** @brief Calculates an absolute array norm, an absolute difference norm, or a\nrelative difference norm.\n\nThe functions norm calculate an absolute norm of src1 (when there is no\nsrc2 ):\n\n\\f[norm =  \\forkthree{\\|\\texttt{src1}\\|_{L_{\\infty}} =  \\max _I | \\texttt{src1} (I)|}{if  \\(\\texttt{normType} = \\texttt{NORM_INF}\\) }\n{ \\| \\texttt{src1} \\| _{L_1} =  \\sum _I | \\texttt{src1} (I)|}{if  \\(\\texttt{normType} = \\texttt{NORM_L1}\\) }\n{ \\| \\texttt{src1} \\| _{L_2} =  \\sqrt{\\sum_I \\texttt{src1}(I)^2} }{if  \\(\\texttt{normType} = \\texttt{NORM_L2}\\) }\\f]\n\nor an absolute or relative difference norm if src2 is there:\n\n\\f[norm =  \\forkthree{\\|\\texttt{src1}-\\texttt{src2}\\|_{L_{\\infty}} =  \\max _I | \\texttt{src1} (I) -  \\texttt{src2} (I)|}{if  \\(\\texttt{normType} = \\texttt{NORM_INF}\\) }\n{ \\| \\texttt{src1} - \\texttt{src2} \\| _{L_1} =  \\sum _I | \\texttt{src1} (I) -  \\texttt{src2} (I)|}{if  \\(\\texttt{normType} = \\texttt{NORM_L1}\\) }\n{ \\| \\texttt{src1} - \\texttt{src2} \\| _{L_2} =  \\sqrt{\\sum_I (\\texttt{src1}(I) - \\texttt{src2}(I))^2} }{if  \\(\\texttt{normType} = \\texttt{NORM_L2}\\) }\\f]\n\nor\n\n\\f[norm =  \\forkthree{\\frac{\\|\\texttt{src1}-\\texttt{src2}\\|_{L_{\\infty}}    }{\\|\\texttt{src2}\\|_{L_{\\infty}} }}{if  \\(\\texttt{normType} = \\texttt{NORM_RELATIVE_INF}\\) }\n{ \\frac{\\|\\texttt{src1}-\\texttt{src2}\\|_{L_1} }{\\|\\texttt{src2}\\|_{L_1}} }{if  \\(\\texttt{normType} = \\texttt{NORM_RELATIVE_L1}\\) }\n{ \\frac{\\|\\texttt{src1}-\\texttt{src2}\\|_{L_2} }{\\|\\texttt{src2}\\|_{L_2}} }{if  \\(\\texttt{normType} = \\texttt{NORM_RELATIVE_L2}\\) }\\f]\n\nThe functions norm return the calculated norm.\n\nWhen the mask parameter is specified and it is not empty, the norm is\ncalculated only over the region specified by the mask.\n\nA multi-channel input arrays are treated as a single-channel, that is,\nthe results for all channels are combined.\n\n@param src1 first input array.\n@param normType type of the norm (see cv::NormTypes).\n@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.\n*/\nCV_EXPORTS_W double norm(InputArray src1, int normType = NORM_L2, InputArray mask = noArray());\n\n/** @overload\n@param src1 first input array.\n@param src2 second input array of the same size and the same type as src1.\n@param normType type of the norm (cv::NormTypes).\n@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.\n*/\nCV_EXPORTS_W double norm(InputArray src1, InputArray src2,\n                         int normType = NORM_L2, InputArray mask = noArray());\n/** @overload\n@param src first input array.\n@param normType type of the norm (see cv::NormTypes).\n*/\nCV_EXPORTS double norm( const SparseMat& src, int normType );\n\n/** @brief computes PSNR image/video quality metric\n\nsee http://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio for details\n@todo document\n  */\nCV_EXPORTS_W double PSNR(InputArray src1, InputArray src2);\n\n/** @brief naive nearest neighbor finder\n\nsee http://en.wikipedia.org/wiki/Nearest_neighbor_search\n@todo document\n  */\nCV_EXPORTS_W void batchDistance(InputArray src1, InputArray src2,\n                                OutputArray dist, int dtype, OutputArray nidx,\n                                int normType = NORM_L2, int K = 0,\n                                InputArray mask = noArray(), int update = 0,\n                                bool crosscheck = false);\n\n/** @brief Normalizes the norm or value range of an array.\n\nThe functions normalize scale and shift the input array elements so that\n\\f[\\| \\texttt{dst} \\| _{L_p}= \\texttt{alpha}\\f]\n(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that\n\\f[\\min _I  \\texttt{dst} (I)= \\texttt{alpha} , \\, \\, \\max _I  \\texttt{dst} (I)= \\texttt{beta}\\f]\n\nwhen normType=NORM_MINMAX (for dense arrays only). The optional mask specifies a sub-array to be\nnormalized. This means that the norm or min-n-max are calculated over the sub-array, and then this\nsub-array is modified to be normalized. If you want to only use the mask to calculate the norm or\nmin-max but modify the whole array, you can use norm and Mat::convertTo.\n\nIn case of sparse matrices, only the non-zero values are analyzed and transformed. Because of this,\nthe range transformation for sparse matrices is not allowed since it can shift the zero level.\n\nPossible usage with some positive example data:\n@code{.cpp}\n    vector<double> positiveData = { 2.0, 8.0, 10.0 };\n    vector<double> normalizedData_l1, normalizedData_l2, normalizedData_inf, normalizedData_minmax;\n\n    // Norm to probability (total count)\n    // sum(numbers) = 20.0\n    // 2.0      0.1     (2.0/20.0)\n    // 8.0      0.4     (8.0/20.0)\n    // 10.0     0.5     (10.0/20.0)\n    normalize(positiveData, normalizedData_l1, 1.0, 0.0, NORM_L1);\n\n    // Norm to unit vector: ||positiveData|| = 1.0\n    // 2.0      0.15\n    // 8.0      0.62\n    // 10.0     0.77\n    normalize(positiveData, normalizedData_l2, 1.0, 0.0, NORM_L2);\n\n    // Norm to max element\n    // 2.0      0.2     (2.0/10.0)\n    // 8.0      0.8     (8.0/10.0)\n    // 10.0     1.0     (10.0/10.0)\n    normalize(positiveData, normalizedData_inf, 1.0, 0.0, NORM_INF);\n\n    // Norm to range [0.0;1.0]\n    // 2.0      0.0     (shift to left border)\n    // 8.0      0.75    (6.0/8.0)\n    // 10.0     1.0     (shift to right border)\n    normalize(positiveData, normalizedData_minmax, 1.0, 0.0, NORM_MINMAX);\n@endcode\n\n@param src input array.\n@param dst output array of the same size as src .\n@param alpha norm value to normalize to or the lower range boundary in case of the range\nnormalization.\n@param beta upper range boundary in case of the range normalization; it is not used for the norm\nnormalization.\n@param norm_type normalization type (see cv::NormTypes).\n@param dtype when negative, the output array has the same type as src; otherwise, it has the same\nnumber of channels as src and the depth =CV_MAT_DEPTH(dtype).\n@param mask optional operation mask.\n@sa norm, Mat::convertTo, SparseMat::convertTo\n*/\nCV_EXPORTS_W void normalize( InputArray src, InputOutputArray dst, double alpha = 1, double beta = 0,\n                             int norm_type = NORM_L2, int dtype = -1, InputArray mask = noArray());\n\n/** @overload\n@param src input array.\n@param dst output array of the same size as src .\n@param alpha norm value to normalize to or the lower range boundary in case of the range\nnormalization.\n@param normType normalization type (see cv::NormTypes).\n*/\nCV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType );\n\n/** @brief Finds the global minimum and maximum in an array.\n\nThe functions minMaxLoc find the minimum and maximum element values and their positions. The\nextremums are searched across the whole array or, if mask is not an empty array, in the specified\narray region.\n\nThe functions do not work with multi-channel arrays. If you need to find minimum or maximum\nelements across all the channels, use Mat::reshape first to reinterpret the array as\nsingle-channel. Or you may extract the particular channel using either extractImageCOI , or\nmixChannels , or split .\n@param src input single-channel array.\n@param minVal pointer to the returned minimum value; NULL is used if not required.\n@param maxVal pointer to the returned maximum value; NULL is used if not required.\n@param minLoc pointer to the returned minimum location (in 2D case); NULL is used if not required.\n@param maxLoc pointer to the returned maximum location (in 2D case); NULL is used if not required.\n@param mask optional mask used to select a sub-array.\n@sa max, min, compare, inRange, extractImageCOI, mixChannels, split, Mat::reshape\n*/\nCV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal,\n                            CV_OUT double* maxVal = 0, CV_OUT Point* minLoc = 0,\n                            CV_OUT Point* maxLoc = 0, InputArray mask = noArray());\n\n\n/** @brief Finds the global minimum and maximum in an array\n\nThe function minMaxIdx finds the minimum and maximum element values and their positions. The\nextremums are searched across the whole array or, if mask is not an empty array, in the specified\narray region. The function does not work with multi-channel arrays. If you need to find minimum or\nmaximum elements across all the channels, use Mat::reshape first to reinterpret the array as\nsingle-channel. Or you may extract the particular channel using either extractImageCOI , or\nmixChannels , or split . In case of a sparse matrix, the minimum is found among non-zero elements\nonly.\n@note When minIdx is not NULL, it must have at least 2 elements (as well as maxIdx), even if src is\na single-row or single-column matrix. In OpenCV (following MATLAB) each array has at least 2\ndimensions, i.e. single-column matrix is Mx1 matrix (and therefore minIdx/maxIdx will be\n(i1,0)/(i2,0)) and single-row matrix is 1xN matrix (and therefore minIdx/maxIdx will be\n(0,j1)/(0,j2)).\n@param src input single-channel array.\n@param minVal pointer to the returned minimum value; NULL is used if not required.\n@param maxVal pointer to the returned maximum value; NULL is used if not required.\n@param minIdx pointer to the returned minimum location (in nD case); NULL is used if not required;\nOtherwise, it must point to an array of src.dims elements, the coordinates of the minimum element\nin each dimension are stored there sequentially.\n@param maxIdx pointer to the returned maximum location (in nD case). NULL is used if not required.\n@param mask specified array region\n*/\nCV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal = 0,\n                          int* minIdx = 0, int* maxIdx = 0, InputArray mask = noArray());\n\n/** @overload\n@param a input single-channel array.\n@param minVal pointer to the returned minimum value; NULL is used if not required.\n@param maxVal pointer to the returned maximum value; NULL is used if not required.\n@param minIdx pointer to the returned minimum location (in nD case); NULL is used if not required;\nOtherwise, it must point to an array of src.dims elements, the coordinates of the minimum element\nin each dimension are stored there sequentially.\n@param maxIdx pointer to the returned maximum location (in nD case). NULL is used if not required.\n*/\nCV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal,\n                          double* maxVal, int* minIdx = 0, int* maxIdx = 0);\n\n/** @brief Reduces a matrix to a vector.\n\nThe function reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of\n1D vectors and performing the specified operation on the vectors until a single row/column is\nobtained. For example, the function can be used to compute horizontal and vertical projections of a\nraster image. In case of REDUCE_SUM and REDUCE_AVG , the output may have a larger element\nbit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction\nmodes.\n@param src input 2D matrix.\n@param dst output vector. Its size and type is defined by dim and dtype parameters.\n@param dim dimension index along which the matrix is reduced. 0 means that the matrix is reduced to\na single row. 1 means that the matrix is reduced to a single column.\n@param rtype reduction operation that could be one of cv::ReduceTypes\n@param dtype when negative, the output vector will have the same type as the input matrix,\notherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()).\n@sa repeat\n*/\nCV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype = -1);\n\n/** @brief Creates one multi-channel array out of several single-channel ones.\n\nThe function merge merges several arrays to make a single multi-channel array. That is, each\nelement of the output array will be a concatenation of the elements of the input arrays, where\nelements of i-th input array are treated as mv[i].channels()-element vectors.\n\nThe function cv::split does the reverse operation. If you need to shuffle channels in some other\nadvanced way, use cv::mixChannels.\n@param mv input array of matrices to be merged; all the matrices in mv must have the same\nsize and the same depth.\n@param count number of input matrices when mv is a plain C array; it must be greater than zero.\n@param dst output array of the same size and the same depth as mv[0]; The number of channels will\nbe equal to the parameter count.\n@sa  mixChannels, split, Mat::reshape\n*/\nCV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst);\n\n/** @overload\n@param mv input vector of matrices to be merged; all the matrices in mv must have the same\nsize and the same depth.\n@param dst output array of the same size and the same depth as mv[0]; The number of channels will\nbe the total number of channels in the matrix array.\n  */\nCV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst);\n\n/** @brief Divides a multi-channel array into several single-channel arrays.\n\nThe functions split split a multi-channel array into separate single-channel arrays:\n\\f[\\texttt{mv} [c](I) =  \\texttt{src} (I)_c\\f]\nIf you need to extract a single channel or do some other sophisticated channel permutation, use\nmixChannels .\n@param src input multi-channel array.\n@param mvbegin output array; the number of arrays must match src.channels(); the arrays themselves are\nreallocated, if needed.\n@sa merge, mixChannels, cvtColor\n*/\nCV_EXPORTS void split(const Mat& src, Mat* mvbegin);\n\n/** @overload\n@param m input multi-channel array.\n@param mv output vector of arrays; the arrays themselves are reallocated, if needed.\n*/\nCV_EXPORTS_W void split(InputArray m, OutputArrayOfArrays mv);\n\n/** @brief Copies specified channels from input arrays to the specified channels of\noutput arrays.\n\nThe function cv::mixChannels provides an advanced mechanism for shuffling image channels.\n\ncv::split and cv::merge and some forms of cv::cvtColor are partial cases of cv::mixChannels .\n\nIn the example below, the code splits a 4-channel BGRA image into a 3-channel BGR (with B and R\nchannels swapped) and a separate alpha-channel image:\n@code{.cpp}\n    Mat bgra( 100, 100, CV_8UC4, Scalar(255,0,0,255) );\n    Mat bgr( bgra.rows, bgra.cols, CV_8UC3 );\n    Mat alpha( bgra.rows, bgra.cols, CV_8UC1 );\n\n    // forming an array of matrices is a quite efficient operation,\n    // because the matrix data is not copied, only the headers\n    Mat out[] = { bgr, alpha };\n    // bgra[0] -> bgr[2], bgra[1] -> bgr[1],\n    // bgra[2] -> bgr[0], bgra[3] -> alpha[0]\n    int from_to[] = { 0,2, 1,1, 2,0, 3,3 };\n    mixChannels( &bgra, 1, out, 2, from_to, 4 );\n@endcode\n@note Unlike many other new-style C++ functions in OpenCV (see the introduction section and\nMat::create ), cv::mixChannels requires the output arrays to be pre-allocated before calling the\nfunction.\n@param src input array or vector of matrices; all of the matrices must have the same size and the\nsame depth.\n@param nsrcs number of matrices in `src`.\n@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and\ndepth must be the same as in `src[0]`.\n@param ndsts number of matrices in `dst`.\n@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\\*2] is\na 0-based index of the input channel in src, fromTo[k\\*2+1] is an index of the output channel in\ndst; the continuous channel numbering is used: the first input image channels are indexed from 0 to\nsrc[0].channels()-1, the second input image channels are indexed from src[0].channels() to\nsrc[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image\nchannels; as a special case, when fromTo[k\\*2] is negative, the corresponding output channel is\nfilled with zero .\n@param npairs number of index pairs in `fromTo`.\n@sa cv::split, cv::merge, cv::cvtColor\n*/\nCV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts,\n                            const int* fromTo, size_t npairs);\n\n/** @overload\n@param src input array or vector of matrices; all of the matrices must have the same size and the\nsame depth.\n@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and\ndepth must be the same as in src[0].\n@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\\*2] is\na 0-based index of the input channel in src, fromTo[k\\*2+1] is an index of the output channel in\ndst; the continuous channel numbering is used: the first input image channels are indexed from 0 to\nsrc[0].channels()-1, the second input image channels are indexed from src[0].channels() to\nsrc[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image\nchannels; as a special case, when fromTo[k\\*2] is negative, the corresponding output channel is\nfilled with zero .\n@param npairs number of index pairs in fromTo.\n*/\nCV_EXPORTS void mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,\n                            const int* fromTo, size_t npairs);\n\n/** @overload\n@param src input array or vector of matrices; all of the matrices must have the same size and the\nsame depth.\n@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and\ndepth must be the same as in src[0].\n@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\\*2] is\na 0-based index of the input channel in src, fromTo[k\\*2+1] is an index of the output channel in\ndst; the continuous channel numbering is used: the first input image channels are indexed from 0 to\nsrc[0].channels()-1, the second input image channels are indexed from src[0].channels() to\nsrc[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image\nchannels; as a special case, when fromTo[k\\*2] is negative, the corresponding output channel is\nfilled with zero .\n*/\nCV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,\n                              const std::vector<int>& fromTo);\n\n/** @brief extracts a single channel from src (coi is 0-based index)\n@todo document\n*/\nCV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi);\n\n/** @brief inserts a single channel to dst (coi is 0-based index)\n@todo document\n*/\nCV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi);\n\n/** @brief Flips a 2D array around vertical, horizontal, or both axes.\n\nThe function flip flips the array in one of three different ways (row\nand column indices are 0-based):\n\\f[\\texttt{dst} _{ij} =\n\\left\\{\n\\begin{array}{l l}\n\\texttt{src} _{\\texttt{src.rows}-i-1,j} & if\\;  \\texttt{flipCode} = 0 \\\\\n\\texttt{src} _{i, \\texttt{src.cols} -j-1} & if\\;  \\texttt{flipCode} > 0 \\\\\n\\texttt{src} _{ \\texttt{src.rows} -i-1, \\texttt{src.cols} -j-1} & if\\; \\texttt{flipCode} < 0 \\\\\n\\end{array}\n\\right.\\f]\nThe example scenarios of using the function are the following:\n*   Vertical flipping of the image (flipCode == 0) to switch between\n    top-left and bottom-left image origin. This is a typical operation\n    in video processing on Microsoft Windows\\* OS.\n*   Horizontal flipping of the image with the subsequent horizontal\n    shift and absolute difference calculation to check for a\n    vertical-axis symmetry (flipCode \\> 0).\n*   Simultaneous horizontal and vertical flipping of the image with\n    the subsequent shift and absolute difference calculation to check\n    for a central symmetry (flipCode \\< 0).\n*   Reversing the order of point arrays (flipCode \\> 0 or\n    flipCode == 0).\n@param src input array.\n@param dst output array of the same size and type as src.\n@param flipCode a flag to specify how to flip the array; 0 means\nflipping around the x-axis and positive value (for example, 1) means\nflipping around y-axis. Negative value (for example, -1) means flipping\naround both axes.\n@sa transpose , repeat , completeSymm\n*/\nCV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode);\n\n/** @brief Fills the output array with repeated copies of the input array.\n\nThe functions repeat duplicate the input array one or more times along each of the two axes:\n\\f[\\texttt{dst} _{ij}= \\texttt{src} _{i\\mod src.rows, \\; j\\mod src.cols }\\f]\nThe second variant of the function is more convenient to use with @ref MatrixExpressions.\n@param src input array to replicate.\n@param dst output array of the same type as src.\n@param ny Flag to specify how many times the src is repeated along the\nvertical axis.\n@param nx Flag to specify how many times the src is repeated along the\nhorizontal axis.\n@sa reduce\n*/\nCV_EXPORTS_W void repeat(InputArray src, int ny, int nx, OutputArray dst);\n\n/** @overload\n@param src input array to replicate.\n@param ny Flag to specify how many times the src is repeated along the\nvertical axis.\n@param nx Flag to specify how many times the src is repeated along the\nhorizontal axis.\n  */\nCV_EXPORTS Mat repeat(const Mat& src, int ny, int nx);\n\n/** @brief Applies horizontal concatenation to given matrices.\n\nThe function horizontally concatenates two or more cv::Mat matrices (with the same number of rows).\n@code{.cpp}\n    cv::Mat matArray[] = { cv::Mat(4, 1, CV_8UC1, cv::Scalar(1)),\n                           cv::Mat(4, 1, CV_8UC1, cv::Scalar(2)),\n                           cv::Mat(4, 1, CV_8UC1, cv::Scalar(3)),};\n\n    cv::Mat out;\n    cv::hconcat( matArray, 3, out );\n    //out:\n    //[1, 2, 3;\n    // 1, 2, 3;\n    // 1, 2, 3;\n    // 1, 2, 3]\n@endcode\n@param src input array or vector of matrices. all of the matrices must have the same number of rows and the same depth.\n@param nsrc number of matrices in src.\n@param dst output array. It has the same number of rows and depth as the src, and the sum of cols of the src.\n@sa cv::vconcat(const Mat*, size_t, OutputArray), @sa cv::vconcat(InputArrayOfArrays, OutputArray) and @sa cv::vconcat(InputArray, InputArray, OutputArray)\n*/\nCV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst);\n/** @overload\n @code{.cpp}\n    cv::Mat_<float> A = (cv::Mat_<float>(3, 2) << 1, 4,\n                                                  2, 5,\n                                                  3, 6);\n    cv::Mat_<float> B = (cv::Mat_<float>(3, 2) << 7, 10,\n                                                  8, 11,\n                                                  9, 12);\n\n    cv::Mat C;\n    cv::hconcat(A, B, C);\n    //C:\n    //[1, 4, 7, 10;\n    // 2, 5, 8, 11;\n    // 3, 6, 9, 12]\n @endcode\n @param src1 first input array to be considered for horizontal concatenation.\n @param src2 second input array to be considered for horizontal concatenation.\n @param dst output array. It has the same number of rows and depth as the src1 and src2, and the sum of cols of the src1 and src2.\n */\nCV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst);\n/** @overload\n @code{.cpp}\n    std::vector<cv::Mat> matrices = { cv::Mat(4, 1, CV_8UC1, cv::Scalar(1)),\n                                      cv::Mat(4, 1, CV_8UC1, cv::Scalar(2)),\n                                      cv::Mat(4, 1, CV_8UC1, cv::Scalar(3)),};\n\n    cv::Mat out;\n    cv::hconcat( matrices, out );\n    //out:\n    //[1, 2, 3;\n    // 1, 2, 3;\n    // 1, 2, 3;\n    // 1, 2, 3]\n @endcode\n @param src input array or vector of matrices. all of the matrices must have the same number of rows and the same depth.\n @param dst output array. It has the same number of rows and depth as the src, and the sum of cols of the src.\nsame depth.\n */\nCV_EXPORTS_W void hconcat(InputArrayOfArrays src, OutputArray dst);\n\n/** @brief Applies vertical concatenation to given matrices.\n\nThe function vertically concatenates two or more cv::Mat matrices (with the same number of cols).\n@code{.cpp}\n    cv::Mat matArray[] = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)),\n                           cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)),\n                           cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),};\n\n    cv::Mat out;\n    cv::vconcat( matArray, 3, out );\n    //out:\n    //[1,   1,   1,   1;\n    // 2,   2,   2,   2;\n    // 3,   3,   3,   3]\n@endcode\n@param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth.\n@param nsrc number of matrices in src.\n@param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src.\n@sa cv::hconcat(const Mat*, size_t, OutputArray), @sa cv::hconcat(InputArrayOfArrays, OutputArray) and @sa cv::hconcat(InputArray, InputArray, OutputArray)\n*/\nCV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst);\n/** @overload\n @code{.cpp}\n    cv::Mat_<float> A = (cv::Mat_<float>(3, 2) << 1, 7,\n                                                  2, 8,\n                                                  3, 9);\n    cv::Mat_<float> B = (cv::Mat_<float>(3, 2) << 4, 10,\n                                                  5, 11,\n                                                  6, 12);\n\n    cv::Mat C;\n    cv::vconcat(A, B, C);\n    //C:\n    //[1, 7;\n    // 2, 8;\n    // 3, 9;\n    // 4, 10;\n    // 5, 11;\n    // 6, 12]\n @endcode\n @param src1 first input array to be considered for vertical concatenation.\n @param src2 second input array to be considered for vertical concatenation.\n @param dst output array. It has the same number of cols and depth as the src1 and src2, and the sum of rows of the src1 and src2.\n */\nCV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst);\n/** @overload\n @code{.cpp}\n    std::vector<cv::Mat> matrices = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)),\n                                      cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)),\n                                      cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),};\n\n    cv::Mat out;\n    cv::vconcat( matrices, out );\n    //out:\n    //[1,   1,   1,   1;\n    // 2,   2,   2,   2;\n    // 3,   3,   3,   3]\n @endcode\n @param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth\n @param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src.\nsame depth.\n */\nCV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst);\n\n/** @brief computes bitwise conjunction of the two arrays (dst = src1 & src2)\nCalculates the per-element bit-wise conjunction of two arrays or an\narray and a scalar.\n\nThe function calculates the per-element bit-wise logical conjunction for:\n*   Two arrays when src1 and src2 have the same size:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1} (I)  \\wedge \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\n*   An array and a scalar when src2 is constructed from Scalar or has\n    the same number of elements as `src1.channels()`:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1} (I)  \\wedge \\texttt{src2} \\quad \\texttt{if mask} (I) \\ne0\\f]\n*   A scalar and an array when src1 is constructed from Scalar or has\n    the same number of elements as `src2.channels()`:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1}  \\wedge \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\nIn case of floating-point arrays, their machine-specific bit\nrepresentations (usually IEEE754-compliant) are used for the operation.\nIn case of multi-channel arrays, each channel is processed\nindependently. In the second and third cases above, the scalar is first\nconverted to the array type.\n@param src1 first input array or a scalar.\n@param src2 second input array or a scalar.\n@param dst output array that has the same size and type as the input\narrays.\n@param mask optional operation mask, 8-bit single channel array, that\nspecifies elements of the output array to be changed.\n*/\nCV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2,\n                              OutputArray dst, InputArray mask = noArray());\n\n/** @brief Calculates the per-element bit-wise disjunction of two arrays or an\narray and a scalar.\n\nThe function calculates the per-element bit-wise logical disjunction for:\n*   Two arrays when src1 and src2 have the same size:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1} (I)  \\vee \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\n*   An array and a scalar when src2 is constructed from Scalar or has\n    the same number of elements as `src1.channels()`:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1} (I)  \\vee \\texttt{src2} \\quad \\texttt{if mask} (I) \\ne0\\f]\n*   A scalar and an array when src1 is constructed from Scalar or has\n    the same number of elements as `src2.channels()`:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1}  \\vee \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\nIn case of floating-point arrays, their machine-specific bit\nrepresentations (usually IEEE754-compliant) are used for the operation.\nIn case of multi-channel arrays, each channel is processed\nindependently. In the second and third cases above, the scalar is first\nconverted to the array type.\n@param src1 first input array or a scalar.\n@param src2 second input array or a scalar.\n@param dst output array that has the same size and type as the input\narrays.\n@param mask optional operation mask, 8-bit single channel array, that\nspecifies elements of the output array to be changed.\n*/\nCV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2,\n                             OutputArray dst, InputArray mask = noArray());\n\n/** @brief Calculates the per-element bit-wise \"exclusive or\" operation on two\narrays or an array and a scalar.\n\nThe function calculates the per-element bit-wise logical \"exclusive-or\"\noperation for:\n*   Two arrays when src1 and src2 have the same size:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1} (I)  \\oplus \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\n*   An array and a scalar when src2 is constructed from Scalar or has\n    the same number of elements as `src1.channels()`:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1} (I)  \\oplus \\texttt{src2} \\quad \\texttt{if mask} (I) \\ne0\\f]\n*   A scalar and an array when src1 is constructed from Scalar or has\n    the same number of elements as `src2.channels()`:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1}  \\oplus \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\nIn case of floating-point arrays, their machine-specific bit\nrepresentations (usually IEEE754-compliant) are used for the operation.\nIn case of multi-channel arrays, each channel is processed\nindependently. In the 2nd and 3rd cases above, the scalar is first\nconverted to the array type.\n@param src1 first input array or a scalar.\n@param src2 second input array or a scalar.\n@param dst output array that has the same size and type as the input\narrays.\n@param mask optional operation mask, 8-bit single channel array, that\nspecifies elements of the output array to be changed.\n*/\nCV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2,\n                              OutputArray dst, InputArray mask = noArray());\n\n/** @brief  Inverts every bit of an array.\n\nThe function calculates per-element bit-wise inversion of the input\narray:\n\\f[\\texttt{dst} (I) =  \\neg \\texttt{src} (I)\\f]\nIn case of a floating-point input array, its machine-specific bit\nrepresentation (usually IEEE754-compliant) is used for the operation. In\ncase of multi-channel arrays, each channel is processed independently.\n@param src input array.\n@param dst output array that has the same size and type as the input\narray.\n@param mask optional operation mask, 8-bit single channel array, that\nspecifies elements of the output array to be changed.\n*/\nCV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst,\n                              InputArray mask = noArray());\n\n/** @brief Calculates the per-element absolute difference between two arrays or between an array and a scalar.\n\nThe function absdiff calculates:\n*   Absolute difference between two arrays when they have the same\n    size and type:\n    \\f[\\texttt{dst}(I) =  \\texttt{saturate} (| \\texttt{src1}(I) -  \\texttt{src2}(I)|)\\f]\n*   Absolute difference between an array and a scalar when the second\n    array is constructed from Scalar or has as many elements as the\n    number of channels in `src1`:\n    \\f[\\texttt{dst}(I) =  \\texttt{saturate} (| \\texttt{src1}(I) -  \\texttt{src2} |)\\f]\n*   Absolute difference between a scalar and an array when the first\n    array is constructed from Scalar or has as many elements as the\n    number of channels in `src2`:\n    \\f[\\texttt{dst}(I) =  \\texttt{saturate} (| \\texttt{src1} -  \\texttt{src2}(I) |)\\f]\n    where I is a multi-dimensional index of array elements. In case of\n    multi-channel arrays, each channel is processed independently.\n@note Saturation is not applied when the arrays have the depth CV_32S.\nYou may even get a negative value in the case of overflow.\n@param src1 first input array or a scalar.\n@param src2 second input array or a scalar.\n@param dst output array that has the same size and type as input arrays.\n@sa cv::abs(const Mat&)\n*/\nCV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst);\n\n/** @brief  Checks if array elements lie between the elements of two other arrays.\n\nThe function checks the range as follows:\n-   For every element of a single-channel input array:\n    \\f[\\texttt{dst} (I)= \\texttt{lowerb} (I)_0  \\leq \\texttt{src} (I)_0 \\leq  \\texttt{upperb} (I)_0\\f]\n-   For two-channel arrays:\n    \\f[\\texttt{dst} (I)= \\texttt{lowerb} (I)_0  \\leq \\texttt{src} (I)_0 \\leq  \\texttt{upperb} (I)_0  \\land \\texttt{lowerb} (I)_1  \\leq \\texttt{src} (I)_1 \\leq  \\texttt{upperb} (I)_1\\f]\n-   and so forth.\n\nThat is, dst (I) is set to 255 (all 1 -bits) if src (I) is within the\nspecified 1D, 2D, 3D, ... box and 0 otherwise.\n\nWhen the lower and/or upper boundary parameters are scalars, the indexes\n(I) at lowerb and upperb in the above formulas should be omitted.\n@param src first input array.\n@param lowerb inclusive lower boundary array or a scalar.\n@param upperb inclusive upper boundary array or a scalar.\n@param dst output array of the same size as src and CV_8U type.\n*/\nCV_EXPORTS_W void inRange(InputArray src, InputArray lowerb,\n                          InputArray upperb, OutputArray dst);\n\n/** @brief Performs the per-element comparison of two arrays or an array and scalar value.\n\nThe function compares:\n*   Elements of two arrays when src1 and src2 have the same size:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1} (I)  \\,\\texttt{cmpop}\\, \\texttt{src2} (I)\\f]\n*   Elements of src1 with a scalar src2 when src2 is constructed from\n    Scalar or has a single element:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1}(I) \\,\\texttt{cmpop}\\,  \\texttt{src2}\\f]\n*   src1 with elements of src2 when src1 is constructed from Scalar or\n    has a single element:\n    \\f[\\texttt{dst} (I) =  \\texttt{src1}  \\,\\texttt{cmpop}\\, \\texttt{src2} (I)\\f]\nWhen the comparison result is true, the corresponding element of output\narray is set to 255. The comparison operations can be replaced with the\nequivalent matrix expressions:\n@code{.cpp}\n    Mat dst1 = src1 >= src2;\n    Mat dst2 = src1 < 8;\n    ...\n@endcode\n@param src1 first input array or a scalar; when it is an array, it must have a single channel.\n@param src2 second input array or a scalar; when it is an array, it must have a single channel.\n@param dst output array of type ref CV_8U that has the same size and the same number of channels as\n    the input arrays.\n@param cmpop a flag, that specifies correspondence between the arrays (cv::CmpTypes)\n@sa checkRange, min, max, threshold\n*/\nCV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop);\n\n/** @brief Calculates per-element minimum of two arrays or an array and a scalar.\n\nThe functions min calculate the per-element minimum of two arrays:\n\\f[\\texttt{dst} (I)= \\min ( \\texttt{src1} (I), \\texttt{src2} (I))\\f]\nor array and a scalar:\n\\f[\\texttt{dst} (I)= \\min ( \\texttt{src1} (I), \\texttt{value} )\\f]\n@param src1 first input array.\n@param src2 second input array of the same size and type as src1.\n@param dst output array of the same size and type as src1.\n@sa max, compare, inRange, minMaxLoc\n*/\nCV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst);\n/** @overload\nneeded to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)\n*/\nCV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst);\n/** @overload\nneeded to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)\n*/\nCV_EXPORTS void min(const UMat& src1, const UMat& src2, UMat& dst);\n\n/** @brief Calculates per-element maximum of two arrays or an array and a scalar.\n\nThe functions max calculate the per-element maximum of two arrays:\n\\f[\\texttt{dst} (I)= \\max ( \\texttt{src1} (I), \\texttt{src2} (I))\\f]\nor array and a scalar:\n\\f[\\texttt{dst} (I)= \\max ( \\texttt{src1} (I), \\texttt{value} )\\f]\n@param src1 first input array.\n@param src2 second input array of the same size and type as src1 .\n@param dst output array of the same size and type as src1.\n@sa  min, compare, inRange, minMaxLoc, @ref MatrixExpressions\n*/\nCV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst);\n/** @overload\nneeded to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)\n*/\nCV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst);\n/** @overload\nneeded to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)\n*/\nCV_EXPORTS void max(const UMat& src1, const UMat& src2, UMat& dst);\n\n/** @brief Calculates a square root of array elements.\n\nThe functions sqrt calculate a square root of each input array element.\nIn case of multi-channel arrays, each channel is processed\nindependently. The accuracy is approximately the same as of the built-in\nstd::sqrt .\n@param src input floating-point array.\n@param dst output array of the same size and type as src.\n*/\nCV_EXPORTS_W void sqrt(InputArray src, OutputArray dst);\n\n/** @brief Raises every array element to a power.\n\nThe function pow raises every element of the input array to power :\n\\f[\\texttt{dst} (I) =  \\fork{\\texttt{src}(I)^{power}}{if \\(\\texttt{power}\\) is integer}{|\\texttt{src}(I)|^{power}}{otherwise}\\f]\n\nSo, for a non-integer power exponent, the absolute values of input array\nelements are used. However, it is possible to get true values for\nnegative values using some extra operations. In the example below,\ncomputing the 5th root of array src shows:\n@code{.cpp}\n    Mat mask = src < 0;\n    pow(src, 1./5, dst);\n    subtract(Scalar::all(0), dst, dst, mask);\n@endcode\nFor some values of power, such as integer values, 0.5 and -0.5,\nspecialized faster algorithms are used.\n\nSpecial values (NaN, Inf) are not handled.\n@param src input array.\n@param power exponent of power.\n@param dst output array of the same size and type as src.\n@sa sqrt, exp, log, cartToPolar, polarToCart\n*/\nCV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst);\n\n/** @brief Calculates the exponent of every array element.\n\nThe function exp calculates the exponent of every element of the input\narray:\n\\f[\\texttt{dst} [I] = e^{ src(I) }\\f]\n\nThe maximum relative error is about 7e-6 for single-precision input and\nless than 1e-10 for double-precision input. Currently, the function\nconverts denormalized values to zeros on output. Special values (NaN,\nInf) are not handled.\n@param src input array.\n@param dst output array of the same size and type as src.\n@sa log , cartToPolar , polarToCart , phase , pow , sqrt , magnitude\n*/\nCV_EXPORTS_W void exp(InputArray src, OutputArray dst);\n\n/** @brief Calculates the natural logarithm of every array element.\n\nThe function log calculates the natural logarithm of the absolute value\nof every element of the input array:\n\\f[\\texttt{dst} (I) =  \\fork{\\log |\\texttt{src}(I)|}{if \\(\\texttt{src}(I) \\ne 0\\) }{\\texttt{C}}{otherwise}\\f]\n\nwhere C is a large negative number (about -700 in the current\nimplementation). The maximum relative error is about 7e-6 for\nsingle-precision input and less than 1e-10 for double-precision input.\nSpecial values (NaN, Inf) are not handled.\n@param src input array.\n@param dst output array of the same size and type as src .\n@sa exp, cartToPolar, polarToCart, phase, pow, sqrt, magnitude\n*/\nCV_EXPORTS_W void log(InputArray src, OutputArray dst);\n\n/** @brief Calculates x and y coordinates of 2D vectors from their magnitude and angle.\n\nThe function polarToCart calculates the Cartesian coordinates of each 2D\nvector represented by the corresponding elements of magnitude and angle:\n\\f[\\begin{array}{l} \\texttt{x} (I) =  \\texttt{magnitude} (I) \\cos ( \\texttt{angle} (I)) \\\\ \\texttt{y} (I) =  \\texttt{magnitude} (I) \\sin ( \\texttt{angle} (I)) \\\\ \\end{array}\\f]\n\nThe relative accuracy of the estimated coordinates is about 1e-6.\n@param magnitude input floating-point array of magnitudes of 2D vectors;\nit can be an empty matrix (=Mat()), in this case, the function assumes\nthat all the magnitudes are =1; if it is not empty, it must have the\nsame size and type as angle.\n@param angle input floating-point array of angles of 2D vectors.\n@param x output array of x-coordinates of 2D vectors; it has the same\nsize and type as angle.\n@param y output array of y-coordinates of 2D vectors; it has the same\nsize and type as angle.\n@param angleInDegrees when true, the input angles are measured in\ndegrees, otherwise, they are measured in radians.\n@sa cartToPolar, magnitude, phase, exp, log, pow, sqrt\n*/\nCV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle,\n                              OutputArray x, OutputArray y, bool angleInDegrees = false);\n\n/** @brief Calculates the magnitude and angle of 2D vectors.\n\nThe function cartToPolar calculates either the magnitude, angle, or both\nfor every 2D vector (x(I),y(I)):\n\\f[\\begin{array}{l} \\texttt{magnitude} (I)= \\sqrt{\\texttt{x}(I)^2+\\texttt{y}(I)^2} , \\\\ \\texttt{angle} (I)= \\texttt{atan2} ( \\texttt{y} (I), \\texttt{x} (I))[ \\cdot180 / \\pi ] \\end{array}\\f]\n\nThe angles are calculated with accuracy about 0.3 degrees. For the point\n(0,0), the angle is set to 0.\n@param x array of x-coordinates; this must be a single-precision or\ndouble-precision floating-point array.\n@param y array of y-coordinates, that must have the same size and same type as x.\n@param magnitude output array of magnitudes of the same size and type as x.\n@param angle output array of angles that has the same size and type as\nx; the angles are measured in radians (from 0 to 2\\*Pi) or in degrees (0 to 360 degrees).\n@param angleInDegrees a flag, indicating whether the angles are measured\nin radians (which is by default), or in degrees.\n@sa Sobel, Scharr\n*/\nCV_EXPORTS_W void cartToPolar(InputArray x, InputArray y,\n                              OutputArray magnitude, OutputArray angle,\n                              bool angleInDegrees = false);\n\n/** @brief Calculates the rotation angle of 2D vectors.\n\nThe function phase calculates the rotation angle of each 2D vector that\nis formed from the corresponding elements of x and y :\n\\f[\\texttt{angle} (I) =  \\texttt{atan2} ( \\texttt{y} (I), \\texttt{x} (I))\\f]\n\nThe angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0 ,\nthe corresponding angle(I) is set to 0.\n@param x input floating-point array of x-coordinates of 2D vectors.\n@param y input array of y-coordinates of 2D vectors; it must have the\nsame size and the same type as x.\n@param angle output array of vector angles; it has the same size and\nsame type as x .\n@param angleInDegrees when true, the function calculates the angle in\ndegrees, otherwise, they are measured in radians.\n*/\nCV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle,\n                        bool angleInDegrees = false);\n\n/** @brief Calculates the magnitude of 2D vectors.\n\nThe function magnitude calculates the magnitude of 2D vectors formed\nfrom the corresponding elements of x and y arrays:\n\\f[\\texttt{dst} (I) =  \\sqrt{\\texttt{x}(I)^2 + \\texttt{y}(I)^2}\\f]\n@param x floating-point array of x-coordinates of the vectors.\n@param y floating-point array of y-coordinates of the vectors; it must\nhave the same size as x.\n@param magnitude output array of the same size and type as x.\n@sa cartToPolar, polarToCart, phase, sqrt\n*/\nCV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude);\n\n/** @brief Checks every element of an input array for invalid values.\n\nThe functions checkRange check that every array element is neither NaN nor infinite. When minVal \\>\n-DBL_MAX and maxVal \\< DBL_MAX, the functions also check that each value is between minVal and\nmaxVal. In case of multi-channel arrays, each channel is processed independently. If some values\nare out of range, position of the first outlier is stored in pos (when pos != NULL). Then, the\nfunctions either return false (when quiet=true) or throw an exception.\n@param a input array.\n@param quiet a flag, indicating whether the functions quietly return false when the array elements\nare out of range or they throw an exception.\n@param pos optional output parameter, when not NULL, must be a pointer to array of src.dims\nelements.\n@param minVal inclusive lower boundary of valid values range.\n@param maxVal exclusive upper boundary of valid values range.\n*/\nCV_EXPORTS_W bool checkRange(InputArray a, bool quiet = true, CV_OUT Point* pos = 0,\n                            double minVal = -DBL_MAX, double maxVal = DBL_MAX);\n\n/** @brief converts NaN's to the given number\n*/\nCV_EXPORTS_W void patchNaNs(InputOutputArray a, double val = 0);\n\n/** @brief Performs generalized matrix multiplication.\n\nThe function performs generalized matrix multiplication similar to the\ngemm functions in BLAS level 3. For example,\n`gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T)`\ncorresponds to\n\\f[\\texttt{dst} =  \\texttt{alpha} \\cdot \\texttt{src1} ^T  \\cdot \\texttt{src2} +  \\texttt{beta} \\cdot \\texttt{src3} ^T\\f]\n\nIn case of complex (two-channel) data, performed a complex matrix\nmultiplication.\n\nThe function can be replaced with a matrix expression. For example, the\nabove call can be replaced with:\n@code{.cpp}\n    dst = alpha*src1.t()*src2 + beta*src3.t();\n@endcode\n@param src1 first multiplied input matrix that could be real(CV_32FC1,\nCV_64FC1) or complex(CV_32FC2, CV_64FC2).\n@param src2 second multiplied input matrix of the same type as src1.\n@param alpha weight of the matrix product.\n@param src3 third optional delta matrix added to the matrix product; it\nshould have the same type as src1 and src2.\n@param beta weight of src3.\n@param dst output matrix; it has the proper size and the same type as\ninput matrices.\n@param flags operation flags (cv::GemmFlags)\n@sa mulTransposed , transform\n*/\nCV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha,\n                       InputArray src3, double beta, OutputArray dst, int flags = 0);\n\n/** @brief Calculates the product of a matrix and its transposition.\n\nThe function mulTransposed calculates the product of src and its\ntransposition:\n\\f[\\texttt{dst} = \\texttt{scale} ( \\texttt{src} - \\texttt{delta} )^T ( \\texttt{src} - \\texttt{delta} )\\f]\nif aTa=true , and\n\\f[\\texttt{dst} = \\texttt{scale} ( \\texttt{src} - \\texttt{delta} ) ( \\texttt{src} - \\texttt{delta} )^T\\f]\notherwise. The function is used to calculate the covariance matrix. With\nzero delta, it can be used as a faster substitute for general matrix\nproduct A\\*B when B=A'\n@param src input single-channel matrix. Note that unlike gemm, the\nfunction can multiply not only floating-point matrices.\n@param dst output square matrix.\n@param aTa Flag specifying the multiplication ordering. See the\ndescription below.\n@param delta Optional delta matrix subtracted from src before the\nmultiplication. When the matrix is empty ( delta=noArray() ), it is\nassumed to be zero, that is, nothing is subtracted. If it has the same\nsize as src , it is simply subtracted. Otherwise, it is \"repeated\" (see\nrepeat ) to cover the full src and then subtracted. Type of the delta\nmatrix, when it is not empty, must be the same as the type of created\noutput matrix. See the dtype parameter description below.\n@param scale Optional scale factor for the matrix product.\n@param dtype Optional type of the output matrix. When it is negative,\nthe output matrix will have the same type as src . Otherwise, it will be\ntype=CV_MAT_DEPTH(dtype) that should be either CV_32F or CV_64F .\n@sa calcCovarMatrix, gemm, repeat, reduce\n*/\nCV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa,\n                                 InputArray delta = noArray(),\n                                 double scale = 1, int dtype = -1 );\n\n/** @brief Transposes a matrix.\n\nThe function transpose transposes the matrix src :\n\\f[\\texttt{dst} (i,j) =  \\texttt{src} (j,i)\\f]\n@note No complex conjugation is done in case of a complex matrix. It it\nshould be done separately if needed.\n@param src input array.\n@param dst output array of the same type as src.\n*/\nCV_EXPORTS_W void transpose(InputArray src, OutputArray dst);\n\n/** @brief Performs the matrix transformation of every array element.\n\nThe function transform performs the matrix transformation of every\nelement of the array src and stores the results in dst :\n\\f[\\texttt{dst} (I) =  \\texttt{m} \\cdot \\texttt{src} (I)\\f]\n(when m.cols=src.channels() ), or\n\\f[\\texttt{dst} (I) =  \\texttt{m} \\cdot [ \\texttt{src} (I); 1]\\f]\n(when m.cols=src.channels()+1 )\n\nEvery element of the N -channel array src is interpreted as N -element\nvector that is transformed using the M x N or M x (N+1) matrix m to\nM-element vector - the corresponding element of the output array dst .\n\nThe function may be used for geometrical transformation of\nN -dimensional points, arbitrary linear color space transformation (such\nas various kinds of RGB to YUV transforms), shuffling the image\nchannels, and so forth.\n@param src input array that must have as many channels (1 to 4) as\nm.cols or m.cols-1.\n@param dst output array of the same size and depth as src; it has as\nmany channels as m.rows.\n@param m transformation 2x2 or 2x3 floating-point matrix.\n@sa perspectiveTransform, getAffineTransform, estimateRigidTransform, warpAffine, warpPerspective\n*/\nCV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m );\n\n/** @brief Performs the perspective matrix transformation of vectors.\n\nThe function perspectiveTransform transforms every element of src by\ntreating it as a 2D or 3D vector, in the following way:\n\\f[(x, y, z)  \\rightarrow (x'/w, y'/w, z'/w)\\f]\nwhere\n\\f[(x', y', z', w') =  \\texttt{mat} \\cdot \\begin{bmatrix} x & y & z & 1  \\end{bmatrix}\\f]\nand\n\\f[w =  \\fork{w'}{if \\(w' \\ne 0\\)}{\\infty}{otherwise}\\f]\n\nHere a 3D vector transformation is shown. In case of a 2D vector\ntransformation, the z component is omitted.\n\n@note The function transforms a sparse set of 2D or 3D vectors. If you\nwant to transform an image using perspective transformation, use\nwarpPerspective . If you have an inverse problem, that is, you want to\ncompute the most probable perspective transformation out of several\npairs of corresponding points, you can use getPerspectiveTransform or\nfindHomography .\n@param src input two-channel or three-channel floating-point array; each\nelement is a 2D/3D vector to be transformed.\n@param dst output array of the same size and type as src.\n@param m 3x3 or 4x4 floating-point transformation matrix.\n@sa  transform, warpPerspective, getPerspectiveTransform, findHomography\n*/\nCV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m );\n\n/** @brief Copies the lower or the upper half of a square matrix to another half.\n\nThe function completeSymm copies the lower half of a square matrix to\nits another half. The matrix diagonal remains unchanged:\n*   \\f$\\texttt{mtx}_{ij}=\\texttt{mtx}_{ji}\\f$ for \\f$i > j\\f$ if\n    lowerToUpper=false\n*   \\f$\\texttt{mtx}_{ij}=\\texttt{mtx}_{ji}\\f$ for \\f$i < j\\f$ if\n    lowerToUpper=true\n@param mtx input-output floating-point square matrix.\n@param lowerToUpper operation flag; if true, the lower half is copied to\nthe upper half. Otherwise, the upper half is copied to the lower half.\n@sa flip, transpose\n*/\nCV_EXPORTS_W void completeSymm(InputOutputArray mtx, bool lowerToUpper = false);\n\n/** @brief Initializes a scaled identity matrix.\n\nThe function setIdentity initializes a scaled identity matrix:\n\\f[\\texttt{mtx} (i,j)= \\fork{\\texttt{value}}{ if \\(i=j\\)}{0}{otherwise}\\f]\n\nThe function can also be emulated using the matrix initializers and the\nmatrix expressions:\n@code\n    Mat A = Mat::eye(4, 3, CV_32F)*5;\n    // A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]\n@endcode\n@param mtx matrix to initialize (not necessarily square).\n@param s value to assign to diagonal elements.\n@sa Mat::zeros, Mat::ones, Mat::setTo, Mat::operator=\n*/\nCV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s = Scalar(1));\n\n/** @brief Returns the determinant of a square floating-point matrix.\n\nThe function determinant calculates and returns the determinant of the\nspecified matrix. For small matrices ( mtx.cols=mtx.rows\\<=3 ), the\ndirect method is used. For larger matrices, the function uses LU\nfactorization with partial pivoting.\n\nFor symmetric positively-determined matrices, it is also possible to use\neigen decomposition to calculate the determinant.\n@param mtx input matrix that must have CV_32FC1 or CV_64FC1 type and\nsquare size.\n@sa trace, invert, solve, eigen, @ref MatrixExpressions\n*/\nCV_EXPORTS_W double determinant(InputArray mtx);\n\n/** @brief Returns the trace of a matrix.\n\nThe function trace returns the sum of the diagonal elements of the\nmatrix mtx .\n\\f[\\mathrm{tr} ( \\texttt{mtx} ) =  \\sum _i  \\texttt{mtx} (i,i)\\f]\n@param mtx input matrix.\n*/\nCV_EXPORTS_W Scalar trace(InputArray mtx);\n\n/** @brief Finds the inverse or pseudo-inverse of a matrix.\n\nThe function invert inverts the matrix src and stores the result in dst\n. When the matrix src is singular or non-square, the function calculates\nthe pseudo-inverse matrix (the dst matrix) so that norm(src\\*dst - I) is\nminimal, where I is an identity matrix.\n\nIn case of the DECOMP_LU method, the function returns non-zero value if\nthe inverse has been successfully calculated and 0 if src is singular.\n\nIn case of the DECOMP_SVD method, the function returns the inverse\ncondition number of src (the ratio of the smallest singular value to the\nlargest singular value) and 0 if src is singular. The SVD method\ncalculates a pseudo-inverse matrix if src is singular.\n\nSimilarly to DECOMP_LU, the method DECOMP_CHOLESKY works only with\nnon-singular square matrices that should also be symmetrical and\npositively defined. In this case, the function stores the inverted\nmatrix in dst and returns non-zero. Otherwise, it returns 0.\n\n@param src input floating-point M x N matrix.\n@param dst output matrix of N x M size and the same type as src.\n@param flags inversion method (cv::DecompTypes)\n@sa solve, SVD\n*/\nCV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags = DECOMP_LU);\n\n/** @brief Solves one or more linear systems or least-squares problems.\n\nThe function solve solves a linear system or least-squares problem (the\nlatter is possible with SVD or QR methods, or by specifying the flag\nDECOMP_NORMAL ):\n\\f[\\texttt{dst} =  \\arg \\min _X \\| \\texttt{src1} \\cdot \\texttt{X} -  \\texttt{src2} \\|\\f]\n\nIf DECOMP_LU or DECOMP_CHOLESKY method is used, the function returns 1\nif src1 (or \\f$\\texttt{src1}^T\\texttt{src1}\\f$ ) is non-singular. Otherwise,\nit returns 0. In the latter case, dst is not valid. Other methods find a\npseudo-solution in case of a singular left-hand side part.\n\n@note If you want to find a unity-norm solution of an under-defined\nsingular system \\f$\\texttt{src1}\\cdot\\texttt{dst}=0\\f$ , the function solve\nwill not do the work. Use SVD::solveZ instead.\n\n@param src1 input matrix on the left-hand side of the system.\n@param src2 input matrix on the right-hand side of the system.\n@param dst output solution.\n@param flags solution (matrix inversion) method (cv::DecompTypes)\n@sa invert, SVD, eigen\n*/\nCV_EXPORTS_W bool solve(InputArray src1, InputArray src2,\n                        OutputArray dst, int flags = DECOMP_LU);\n\n/** @brief Sorts each row or each column of a matrix.\n\nThe function sort sorts each matrix row or each matrix column in\nascending or descending order. So you should pass two operation flags to\nget desired behaviour. If you want to sort matrix rows or columns\nlexicographically, you can use STL std::sort generic function with the\nproper comparison predicate.\n\n@param src input single-channel array.\n@param dst output array of the same size and type as src.\n@param flags operation flags, a combination of cv::SortFlags\n@sa sortIdx, randShuffle\n*/\nCV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags);\n\n/** @brief Sorts each row or each column of a matrix.\n\nThe function sortIdx sorts each matrix row or each matrix column in the\nascending or descending order. So you should pass two operation flags to\nget desired behaviour. Instead of reordering the elements themselves, it\nstores the indices of sorted elements in the output array. For example:\n@code\n    Mat A = Mat::eye(3,3,CV_32F), B;\n    sortIdx(A, B, SORT_EVERY_ROW + SORT_ASCENDING);\n    // B will probably contain\n    // (because of equal elements in A some permutations are possible):\n    // [[1, 2, 0], [0, 2, 1], [0, 1, 2]]\n@endcode\n@param src input single-channel array.\n@param dst output integer array of the same size as src.\n@param flags operation flags that could be a combination of cv::SortFlags\n@sa sort, randShuffle\n*/\nCV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags);\n\n/** @brief Finds the real roots of a cubic equation.\n\nThe function solveCubic finds the real roots of a cubic equation:\n-   if coeffs is a 4-element vector:\n\\f[\\texttt{coeffs} [0] x^3 +  \\texttt{coeffs} [1] x^2 +  \\texttt{coeffs} [2] x +  \\texttt{coeffs} [3] = 0\\f]\n-   if coeffs is a 3-element vector:\n\\f[x^3 +  \\texttt{coeffs} [0] x^2 +  \\texttt{coeffs} [1] x +  \\texttt{coeffs} [2] = 0\\f]\n\nThe roots are stored in the roots array.\n@param coeffs equation coefficients, an array of 3 or 4 elements.\n@param roots output array of real roots that has 1 or 3 elements.\n*/\nCV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots);\n\n/** @brief Finds the real or complex roots of a polynomial equation.\n\nThe function solvePoly finds real and complex roots of a polynomial equation:\n\\f[\\texttt{coeffs} [n] x^{n} +  \\texttt{coeffs} [n-1] x^{n-1} + ... +  \\texttt{coeffs} [1] x +  \\texttt{coeffs} [0] = 0\\f]\n@param coeffs array of polynomial coefficients.\n@param roots output (complex) array of roots.\n@param maxIters maximum number of iterations the algorithm does.\n*/\nCV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters = 300);\n\n/** @brief Calculates eigenvalues and eigenvectors of a symmetric matrix.\n\nThe functions eigen calculate just eigenvalues, or eigenvalues and eigenvectors of the symmetric\nmatrix src:\n@code\n    src*eigenvectors.row(i).t() = eigenvalues.at<srcType>(i)*eigenvectors.row(i).t()\n@endcode\n@note in the new and the old interfaces different ordering of eigenvalues and eigenvectors\nparameters is used.\n@param src input matrix that must have CV_32FC1 or CV_64FC1 type, square size and be symmetrical\n(src ^T^ == src).\n@param eigenvalues output vector of eigenvalues of the same type as src; the eigenvalues are stored\nin the descending order.\n@param eigenvectors output matrix of eigenvectors; it has the same size and type as src; the\neigenvectors are stored as subsequent matrix rows, in the same order as the corresponding\neigenvalues.\n@sa completeSymm , PCA\n*/\nCV_EXPORTS_W bool eigen(InputArray src, OutputArray eigenvalues,\n                        OutputArray eigenvectors = noArray());\n\n/** @brief Calculates the covariance matrix of a set of vectors.\n\nThe functions calcCovarMatrix calculate the covariance matrix and, optionally, the mean vector of\nthe set of input vectors.\n@param samples samples stored as separate matrices\n@param nsamples number of samples\n@param covar output covariance matrix of the type ctype and square size.\n@param mean input or output (depending on the flags) array as the average value of the input vectors.\n@param flags operation flags as a combination of cv::CovarFlags\n@param ctype type of the matrixl; it equals 'CV_64F' by default.\n@sa PCA, mulTransposed, Mahalanobis\n@todo InputArrayOfArrays\n*/\nCV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean,\n                                 int flags, int ctype = CV_64F);\n\n/** @overload\n@note use cv::COVAR_ROWS or cv::COVAR_COLS flag\n@param samples samples stored as rows/columns of a single matrix.\n@param covar output covariance matrix of the type ctype and square size.\n@param mean input or output (depending on the flags) array as the average value of the input vectors.\n@param flags operation flags as a combination of cv::CovarFlags\n@param ctype type of the matrixl; it equals 'CV_64F' by default.\n*/\nCV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar,\n                                   InputOutputArray mean, int flags, int ctype = CV_64F);\n\n/** wrap PCA::operator() */\nCV_EXPORTS_W void PCACompute(InputArray data, InputOutputArray mean,\n                             OutputArray eigenvectors, int maxComponents = 0);\n\n/** wrap PCA::operator() */\nCV_EXPORTS_W void PCACompute(InputArray data, InputOutputArray mean,\n                             OutputArray eigenvectors, double retainedVariance);\n\n/** wrap PCA::project */\nCV_EXPORTS_W void PCAProject(InputArray data, InputArray mean,\n                             InputArray eigenvectors, OutputArray result);\n\n/** wrap PCA::backProject */\nCV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean,\n                                 InputArray eigenvectors, OutputArray result);\n\n/** wrap SVD::compute */\nCV_EXPORTS_W void SVDecomp( InputArray src, OutputArray w, OutputArray u, OutputArray vt, int flags = 0 );\n\n/** wrap SVD::backSubst */\nCV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt,\n                               InputArray rhs, OutputArray dst );\n\n/** @brief Calculates the Mahalanobis distance between two vectors.\n\nThe function Mahalanobis calculates and returns the weighted distance between two vectors:\n\\f[d( \\texttt{vec1} , \\texttt{vec2} )= \\sqrt{\\sum_{i,j}{\\texttt{icovar(i,j)}\\cdot(\\texttt{vec1}(I)-\\texttt{vec2}(I))\\cdot(\\texttt{vec1(j)}-\\texttt{vec2(j)})} }\\f]\nThe covariance matrix may be calculated using the cv::calcCovarMatrix function and then inverted using\nthe invert function (preferably using the cv::DECOMP_SVD method, as the most accurate).\n@param v1 first 1D input vector.\n@param v2 second 1D input vector.\n@param icovar inverse covariance matrix.\n*/\nCV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar);\n\n/** @brief Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array.\n\nThe function performs one of the following:\n-   Forward the Fourier transform of a 1D vector of N elements:\n    \\f[Y = F^{(N)}  \\cdot X,\\f]\n    where \\f$F^{(N)}_{jk}=\\exp(-2\\pi i j k/N)\\f$ and \\f$i=\\sqrt{-1}\\f$\n-   Inverse the Fourier transform of a 1D vector of N elements:\n    \\f[\\begin{array}{l} X'=  \\left (F^{(N)} \\right )^{-1}  \\cdot Y =  \\left (F^{(N)} \\right )^*  \\cdot y  \\\\ X = (1/N)  \\cdot X, \\end{array}\\f]\n    where \\f$F^*=\\left(\\textrm{Re}(F^{(N)})-\\textrm{Im}(F^{(N)})\\right)^T\\f$\n-   Forward the 2D Fourier transform of a M x N matrix:\n    \\f[Y = F^{(M)}  \\cdot X  \\cdot F^{(N)}\\f]\n-   Inverse the 2D Fourier transform of a M x N matrix:\n    \\f[\\begin{array}{l} X'=  \\left (F^{(M)} \\right )^*  \\cdot Y  \\cdot \\left (F^{(N)} \\right )^* \\\\ X =  \\frac{1}{M \\cdot N} \\cdot X' \\end{array}\\f]\n\nIn case of real (single-channel) data, the output spectrum of the forward Fourier transform or input\nspectrum of the inverse Fourier transform can be represented in a packed format called *CCS*\n(complex-conjugate-symmetrical). It was borrowed from IPL (Intel\\* Image Processing Library). Here\nis how 2D *CCS* spectrum looks:\n\\f[\\begin{bmatrix} Re Y_{0,0} & Re Y_{0,1} & Im Y_{0,1} & Re Y_{0,2} & Im Y_{0,2} &  \\cdots & Re Y_{0,N/2-1} & Im Y_{0,N/2-1} & Re Y_{0,N/2}  \\\\ Re Y_{1,0} & Re Y_{1,1} & Im Y_{1,1} & Re Y_{1,2} & Im Y_{1,2} &  \\cdots & Re Y_{1,N/2-1} & Im Y_{1,N/2-1} & Re Y_{1,N/2}  \\\\ Im Y_{1,0} & Re Y_{2,1} & Im Y_{2,1} & Re Y_{2,2} & Im Y_{2,2} &  \\cdots & Re Y_{2,N/2-1} & Im Y_{2,N/2-1} & Im Y_{1,N/2}  \\\\ \\hdotsfor{9} \\\\ Re Y_{M/2-1,0} &  Re Y_{M-3,1}  & Im Y_{M-3,1} &  \\hdotsfor{3} & Re Y_{M-3,N/2-1} & Im Y_{M-3,N/2-1}& Re Y_{M/2-1,N/2}  \\\\ Im Y_{M/2-1,0} &  Re Y_{M-2,1}  & Im Y_{M-2,1} &  \\hdotsfor{3} & Re Y_{M-2,N/2-1} & Im Y_{M-2,N/2-1}& Im Y_{M/2-1,N/2}  \\\\ Re Y_{M/2,0}  &  Re Y_{M-1,1} &  Im Y_{M-1,1} &  \\hdotsfor{3} & Re Y_{M-1,N/2-1} & Im Y_{M-1,N/2-1}& Re Y_{M/2,N/2} \\end{bmatrix}\\f]\n\nIn case of 1D transform of a real vector, the output looks like the first row of the matrix above.\n\nSo, the function chooses an operation mode depending on the flags and size of the input array:\n-   If DFT_ROWS is set or the input array has a single row or single column, the function\n    performs a 1D forward or inverse transform of each row of a matrix when DFT_ROWS is set.\n    Otherwise, it performs a 2D transform.\n-   If the input array is real and DFT_INVERSE is not set, the function performs a forward 1D or\n    2D transform:\n    -   When DFT_COMPLEX_OUTPUT is set, the output is a complex matrix of the same size as\n        input.\n    -   When DFT_COMPLEX_OUTPUT is not set, the output is a real matrix of the same size as\n        input. In case of 2D transform, it uses the packed format as shown above. In case of a\n        single 1D transform, it looks like the first row of the matrix above. In case of\n        multiple 1D transforms (when using the DFT_ROWS flag), each row of the output matrix\n        looks like the first row of the matrix above.\n-   If the input array is complex and either DFT_INVERSE or DFT_REAL_OUTPUT are not set, the\n    output is a complex array of the same size as input. The function performs a forward or\n    inverse 1D or 2D transform of the whole input array or each row of the input array\n    independently, depending on the flags DFT_INVERSE and DFT_ROWS.\n-   When DFT_INVERSE is set and the input array is real, or it is complex but DFT_REAL_OUTPUT\n    is set, the output is a real array of the same size as input. The function performs a 1D or 2D\n    inverse transformation of the whole input array or each individual row, depending on the flags\n    DFT_INVERSE and DFT_ROWS.\n\nIf DFT_SCALE is set, the scaling is done after the transformation.\n\nUnlike dct , the function supports arrays of arbitrary size. But only those arrays are processed\nefficiently, whose sizes can be factorized in a product of small prime numbers (2, 3, and 5 in the\ncurrent implementation). Such an efficient DFT size can be calculated using the getOptimalDFTSize\nmethod.\n\nThe sample below illustrates how to calculate a DFT-based convolution of two 2D real arrays:\n@code\n    void convolveDFT(InputArray A, InputArray B, OutputArray C)\n    {\n        // reallocate the output array if needed\n        C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());\n        Size dftSize;\n        // calculate the size of DFT transform\n        dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);\n        dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);\n\n        // allocate temporary buffers and initialize them with 0's\n        Mat tempA(dftSize, A.type(), Scalar::all(0));\n        Mat tempB(dftSize, B.type(), Scalar::all(0));\n\n        // copy A and B to the top-left corners of tempA and tempB, respectively\n        Mat roiA(tempA, Rect(0,0,A.cols,A.rows));\n        A.copyTo(roiA);\n        Mat roiB(tempB, Rect(0,0,B.cols,B.rows));\n        B.copyTo(roiB);\n\n        // now transform the padded A & B in-place;\n        // use \"nonzeroRows\" hint for faster processing\n        dft(tempA, tempA, 0, A.rows);\n        dft(tempB, tempB, 0, B.rows);\n\n        // multiply the spectrums;\n        // the function handles packed spectrum representations well\n        mulSpectrums(tempA, tempB, tempA);\n\n        // transform the product back from the frequency domain.\n        // Even though all the result rows will be non-zero,\n        // you need only the first C.rows of them, and thus you\n        // pass nonzeroRows == C.rows\n        dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);\n\n        // now copy the result back to C.\n        tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);\n\n        // all the temporary buffers will be deallocated automatically\n    }\n@endcode\nTo optimize this sample, consider the following approaches:\n-   Since nonzeroRows != 0 is passed to the forward transform calls and since A and B are copied to\n    the top-left corners of tempA and tempB, respectively, it is not necessary to clear the whole\n    tempA and tempB. It is only necessary to clear the tempA.cols - A.cols ( tempB.cols - B.cols)\n    rightmost columns of the matrices.\n-   This DFT-based convolution does not have to be applied to the whole big arrays, especially if B\n    is significantly smaller than A or vice versa. Instead, you can calculate convolution by parts.\n    To do this, you need to split the output array C into multiple tiles. For each tile, estimate\n    which parts of A and B are required to calculate convolution in this tile. If the tiles in C are\n    too small, the speed will decrease a lot because of repeated work. In the ultimate case, when\n    each tile in C is a single pixel, the algorithm becomes equivalent to the naive convolution\n    algorithm. If the tiles are too big, the temporary arrays tempA and tempB become too big and\n    there is also a slowdown because of bad cache locality. So, there is an optimal tile size\n    somewhere in the middle.\n-   If different tiles in C can be calculated in parallel and, thus, the convolution is done by\n    parts, the loop can be threaded.\n\nAll of the above improvements have been implemented in matchTemplate and filter2D . Therefore, by\nusing them, you can get the performance even better than with the above theoretically optimal\nimplementation. Though, those two functions actually calculate cross-correlation, not convolution,\nso you need to \"flip\" the second convolution operand B vertically and horizontally using flip .\n@note\n-   An example using the discrete fourier transform can be found at\n    opencv_source_code/samples/cpp/dft.cpp\n-   (Python) An example using the dft functionality to perform Wiener deconvolution can be found\n    at opencv_source/samples/python/deconvolution.py\n-   (Python) An example rearranging the quadrants of a Fourier image can be found at\n    opencv_source/samples/python/dft.py\n@param src input array that could be real or complex.\n@param dst output array whose size and type depends on the flags .\n@param flags transformation flags, representing a combination of the cv::DftFlags\n@param nonzeroRows when the parameter is not zero, the function assumes that only the first\nnonzeroRows rows of the input array (DFT_INVERSE is not set) or only the first nonzeroRows of the\noutput array (DFT_INVERSE is set) contain non-zeros, thus, the function can handle the rest of the\nrows more efficiently and save some time; this technique is very useful for calculating array\ncross-correlation or convolution using DFT.\n@sa dct , getOptimalDFTSize , mulSpectrums, filter2D , matchTemplate , flip , cartToPolar ,\nmagnitude , phase\n*/\nCV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags = 0, int nonzeroRows = 0);\n\n/** @brief Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.\n\nidft(src, dst, flags) is equivalent to dft(src, dst, flags | DFT_INVERSE) .\n@note None of dft and idft scales the result by default. So, you should pass DFT_SCALE to one of\ndft or idft explicitly to make these transforms mutually inverse.\n@sa dft, dct, idct, mulSpectrums, getOptimalDFTSize\n@param src input floating-point real or complex array.\n@param dst output array whose size and type depend on the flags.\n@param flags operation flags (see dft and cv::DftFlags).\n@param nonzeroRows number of dst rows to process; the rest of the rows have undefined content (see\nthe convolution sample in dft description.\n*/\nCV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags = 0, int nonzeroRows = 0);\n\n/** @brief Performs a forward or inverse discrete Cosine transform of 1D or 2D array.\n\nThe function dct performs a forward or inverse discrete Cosine transform (DCT) of a 1D or 2D\nfloating-point array:\n-   Forward Cosine transform of a 1D vector of N elements:\n    \\f[Y = C^{(N)}  \\cdot X\\f]\n    where\n    \\f[C^{(N)}_{jk}= \\sqrt{\\alpha_j/N} \\cos \\left ( \\frac{\\pi(2k+1)j}{2N} \\right )\\f]\n    and\n    \\f$\\alpha_0=1\\f$, \\f$\\alpha_j=2\\f$ for *j \\> 0*.\n-   Inverse Cosine transform of a 1D vector of N elements:\n    \\f[X =  \\left (C^{(N)} \\right )^{-1}  \\cdot Y =  \\left (C^{(N)} \\right )^T  \\cdot Y\\f]\n    (since \\f$C^{(N)}\\f$ is an orthogonal matrix, \\f$C^{(N)} \\cdot \\left(C^{(N)}\\right)^T = I\\f$ )\n-   Forward 2D Cosine transform of M x N matrix:\n    \\f[Y = C^{(N)}  \\cdot X  \\cdot \\left (C^{(N)} \\right )^T\\f]\n-   Inverse 2D Cosine transform of M x N matrix:\n    \\f[X =  \\left (C^{(N)} \\right )^T  \\cdot X  \\cdot C^{(N)}\\f]\n\nThe function chooses the mode of operation by looking at the flags and size of the input array:\n-   If (flags & DCT_INVERSE) == 0 , the function does a forward 1D or 2D transform. Otherwise, it\n    is an inverse 1D or 2D transform.\n-   If (flags & DCT_ROWS) != 0 , the function performs a 1D transform of each row.\n-   If the array is a single column or a single row, the function performs a 1D transform.\n-   If none of the above is true, the function performs a 2D transform.\n\n@note Currently dct supports even-size arrays (2, 4, 6 ...). For data analysis and approximation, you\ncan pad the array when necessary.\nAlso, the function performance depends very much, and not monotonically, on the array size (see\ngetOptimalDFTSize ). In the current implementation DCT of a vector of size N is calculated via DFT\nof a vector of size N/2 . Thus, the optimal DCT size N1 \\>= N can be calculated as:\n@code\n    size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }\n    N1 = getOptimalDCTSize(N);\n@endcode\n@param src input floating-point array.\n@param dst output array of the same size and type as src .\n@param flags transformation flags as a combination of cv::DftFlags (DCT_*)\n@sa dft , getOptimalDFTSize , idct\n*/\nCV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags = 0);\n\n/** @brief Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.\n\nidct(src, dst, flags) is equivalent to dct(src, dst, flags | DCT_INVERSE).\n@param src input floating-point single-channel array.\n@param dst output array of the same size and type as src.\n@param flags operation flags.\n@sa  dct, dft, idft, getOptimalDFTSize\n*/\nCV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags = 0);\n\n/** @brief Performs the per-element multiplication of two Fourier spectrums.\n\nThe function mulSpectrums performs the per-element multiplication of the two CCS-packed or complex\nmatrices that are results of a real or complex Fourier transform.\n\nThe function, together with dft and idft , may be used to calculate convolution (pass conjB=false )\nor correlation (pass conjB=true ) of two arrays rapidly. When the arrays are complex, they are\nsimply multiplied (per element) with an optional conjugation of the second-array elements. When the\narrays are real, they are assumed to be CCS-packed (see dft for details).\n@param a first input array.\n@param b second input array of the same size and type as src1 .\n@param c output array of the same size and type as src1 .\n@param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that\neach row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a `0` as value.\n@param conjB optional flag that conjugates the second input array before the multiplication (true)\nor not (false).\n*/\nCV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c,\n                               int flags, bool conjB = false);\n\n/** @brief Returns the optimal DFT size for a given vector size.\n\nDFT performance is not a monotonic function of a vector size. Therefore, when you calculate\nconvolution of two arrays or perform the spectral analysis of an array, it usually makes sense to\npad the input data with zeros to get a bit larger array that can be transformed much faster than the\noriginal one. Arrays whose size is a power-of-two (2, 4, 8, 16, 32, ...) are the fastest to process.\nThough, the arrays whose size is a product of 2's, 3's, and 5's (for example, 300 = 5\\*5\\*3\\*2\\*2)\nare also processed quite efficiently.\n\nThe function getOptimalDFTSize returns the minimum number N that is greater than or equal to vecsize\nso that the DFT of a vector of size N can be processed efficiently. In the current implementation N\n= 2 ^p^ \\* 3 ^q^ \\* 5 ^r^ for some integer p, q, r.\n\nThe function returns a negative number if vecsize is too large (very close to INT_MAX ).\n\nWhile the function cannot be used directly to estimate the optimal vector size for DCT transform\n(since the current DCT implementation supports only even-size vectors), it can be easily processed\nas getOptimalDFTSize((vecsize+1)/2)\\*2.\n@param vecsize vector size.\n@sa dft , dct , idft , idct , mulSpectrums\n*/\nCV_EXPORTS_W int getOptimalDFTSize(int vecsize);\n\n/** @brief Returns the default random number generator.\n\nThe function theRNG returns the default random number generator. For each thread, there is a\nseparate random number generator, so you can use the function safely in multi-thread environments.\nIf you just need to get a single random number using this generator or initialize an array, you can\nuse randu or randn instead. But if you are going to generate many random numbers inside a loop, it\nis much faster to use this function to retrieve the generator and then use RNG::operator _Tp() .\n@sa RNG, randu, randn\n*/\nCV_EXPORTS RNG& theRNG();\n\n/** @brief Generates a single uniformly-distributed random number or an array of random numbers.\n\nNon-template variant of the function fills the matrix dst with uniformly-distributed\nrandom numbers from the specified range:\n\\f[\\texttt{low} _c  \\leq \\texttt{dst} (I)_c <  \\texttt{high} _c\\f]\n@param dst output array of random numbers; the array must be pre-allocated.\n@param low inclusive lower boundary of the generated random numbers.\n@param high exclusive upper boundary of the generated random numbers.\n@sa RNG, randn, theRNG\n*/\nCV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high);\n\n/** @brief Fills the array with normally distributed random numbers.\n\nThe function randn fills the matrix dst with normally distributed random numbers with the specified\nmean vector and the standard deviation matrix. The generated random numbers are clipped to fit the\nvalue range of the output array data type.\n@param dst output array of random numbers; the array must be pre-allocated and have 1 to 4 channels.\n@param mean mean value (expectation) of the generated random numbers.\n@param stddev standard deviation of the generated random numbers; it can be either a vector (in\nwhich case a diagonal standard deviation matrix is assumed) or a square matrix.\n@sa RNG, randu\n*/\nCV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev);\n\n/** @brief Shuffles the array elements randomly.\n\nThe function randShuffle shuffles the specified 1D array by randomly choosing pairs of elements and\nswapping them. The number of such swap operations will be dst.rows\\*dst.cols\\*iterFactor .\n@param dst input/output numerical 1D array.\n@param iterFactor scale factor that determines the number of random swap operations (see the details\nbelow).\n@param rng optional random number generator used for shuffling; if it is zero, theRNG () is used\ninstead.\n@sa RNG, sort\n*/\nCV_EXPORTS_W void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* rng = 0);\n\n/** @brief Principal Component Analysis\n\nThe class is used to calculate a special basis for a set of vectors. The\nbasis will consist of eigenvectors of the covariance matrix calculated\nfrom the input set of vectors. The class %PCA can also transform\nvectors to/from the new coordinate space defined by the basis. Usually,\nin this new coordinate system, each vector from the original set (and\nany linear combination of such vectors) can be quite accurately\napproximated by taking its first few components, corresponding to the\neigenvectors of the largest eigenvalues of the covariance matrix.\nGeometrically it means that you calculate a projection of the vector to\na subspace formed by a few eigenvectors corresponding to the dominant\neigenvalues of the covariance matrix. And usually such a projection is\nvery close to the original vector. So, you can represent the original\nvector from a high-dimensional space with a much shorter vector\nconsisting of the projected vector's coordinates in the subspace. Such a\ntransformation is also known as Karhunen-Loeve Transform, or KLT.\nSee http://en.wikipedia.org/wiki/Principal_component_analysis\n\nThe sample below is the function that takes two matrices. The first\nfunction stores a set of vectors (a row per vector) that is used to\ncalculate PCA. The second function stores another \"test\" set of vectors\n(a row per vector). First, these vectors are compressed with PCA, then\nreconstructed back, and then the reconstruction error norm is computed\nand printed for each vector. :\n\n@code{.cpp}\nusing namespace cv;\n\nPCA compressPCA(const Mat& pcaset, int maxComponents,\n                const Mat& testset, Mat& compressed)\n{\n    PCA pca(pcaset, // pass the data\n            Mat(), // we do not have a pre-computed mean vector,\n                   // so let the PCA engine to compute it\n            PCA::DATA_AS_ROW, // indicate that the vectors\n                                // are stored as matrix rows\n                                // (use PCA::DATA_AS_COL if the vectors are\n                                // the matrix columns)\n            maxComponents // specify, how many principal components to retain\n            );\n    // if there is no test data, just return the computed basis, ready-to-use\n    if( !testset.data )\n        return pca;\n    CV_Assert( testset.cols == pcaset.cols );\n\n    compressed.create(testset.rows, maxComponents, testset.type());\n\n    Mat reconstructed;\n    for( int i = 0; i < testset.rows; i++ )\n    {\n        Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed;\n        // compress the vector, the result will be stored\n        // in the i-th row of the output matrix\n        pca.project(vec, coeffs);\n        // and then reconstruct it\n        pca.backProject(coeffs, reconstructed);\n        // and measure the error\n        printf(\"%d. diff = %g\\n\", i, norm(vec, reconstructed, NORM_L2));\n    }\n    return pca;\n}\n@endcode\n@sa calcCovarMatrix, mulTransposed, SVD, dft, dct\n*/\nclass CV_EXPORTS PCA\n{\npublic:\n    enum Flags { DATA_AS_ROW = 0, //!< indicates that the input samples are stored as matrix rows\n                 DATA_AS_COL = 1, //!< indicates that the input samples are stored as matrix columns\n                 USE_AVG     = 2  //!\n               };\n\n    /** @brief default constructor\n\n    The default constructor initializes an empty %PCA structure. The other\n    constructors initialize the structure and call PCA::operator()().\n    */\n    PCA();\n\n    /** @overload\n    @param data input samples stored as matrix rows or matrix columns.\n    @param mean optional mean value; if the matrix is empty (@c noArray()),\n    the mean is computed from the data.\n    @param flags operation flags; currently the parameter is only used to\n    specify the data layout (PCA::Flags)\n    @param maxComponents maximum number of components that %PCA should\n    retain; by default, all the components are retained.\n    */\n    PCA(InputArray data, InputArray mean, int flags, int maxComponents = 0);\n\n    /** @overload\n    @param data input samples stored as matrix rows or matrix columns.\n    @param mean optional mean value; if the matrix is empty (noArray()),\n    the mean is computed from the data.\n    @param flags operation flags; currently the parameter is only used to\n    specify the data layout (PCA::Flags)\n    @param retainedVariance Percentage of variance that PCA should retain.\n    Using this parameter will let the PCA decided how many components to\n    retain but it will always keep at least 2.\n    */\n    PCA(InputArray data, InputArray mean, int flags, double retainedVariance);\n\n    /** @brief performs %PCA\n\n    The operator performs %PCA of the supplied dataset. It is safe to reuse\n    the same PCA structure for multiple datasets. That is, if the structure\n    has been previously used with another dataset, the existing internal\n    data is reclaimed and the new eigenvalues, @ref eigenvectors , and @ref\n    mean are allocated and computed.\n\n    The computed eigenvalues are sorted from the largest to the smallest and\n    the corresponding eigenvectors are stored as eigenvectors rows.\n\n    @param data input samples stored as the matrix rows or as the matrix\n    columns.\n    @param mean optional mean value; if the matrix is empty (noArray()),\n    the mean is computed from the data.\n    @param flags operation flags; currently the parameter is only used to\n    specify the data layout. (Flags)\n    @param maxComponents maximum number of components that PCA should\n    retain; by default, all the components are retained.\n    */\n    PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents = 0);\n\n    /** @overload\n    @param data input samples stored as the matrix rows or as the matrix\n    columns.\n    @param mean optional mean value; if the matrix is empty (noArray()),\n    the mean is computed from the data.\n    @param flags operation flags; currently the parameter is only used to\n    specify the data layout. (PCA::Flags)\n    @param retainedVariance Percentage of variance that %PCA should retain.\n    Using this parameter will let the %PCA decided how many components to\n    retain but it will always keep at least 2.\n     */\n    PCA& operator()(InputArray data, InputArray mean, int flags, double retainedVariance);\n\n    /** @brief Projects vector(s) to the principal component subspace.\n\n    The methods project one or more vectors to the principal component\n    subspace, where each vector projection is represented by coefficients in\n    the principal component basis. The first form of the method returns the\n    matrix that the second form writes to the result. So the first form can\n    be used as a part of expression while the second form can be more\n    efficient in a processing loop.\n    @param vec input vector(s); must have the same dimensionality and the\n    same layout as the input data used at %PCA phase, that is, if\n    DATA_AS_ROW are specified, then `vec.cols==data.cols`\n    (vector dimensionality) and `vec.rows` is the number of vectors to\n    project, and the same is true for the PCA::DATA_AS_COL case.\n    */\n    Mat project(InputArray vec) const;\n\n    /** @overload\n    @param vec input vector(s); must have the same dimensionality and the\n    same layout as the input data used at PCA phase, that is, if\n    DATA_AS_ROW are specified, then `vec.cols==data.cols`\n    (vector dimensionality) and `vec.rows` is the number of vectors to\n    project, and the same is true for the PCA::DATA_AS_COL case.\n    @param result output vectors; in case of PCA::DATA_AS_COL, the\n    output matrix has as many columns as the number of input vectors, this\n    means that `result.cols==vec.cols` and the number of rows match the\n    number of principal components (for example, `maxComponents` parameter\n    passed to the constructor).\n     */\n    void project(InputArray vec, OutputArray result) const;\n\n    /** @brief Reconstructs vectors from their PC projections.\n\n    The methods are inverse operations to PCA::project. They take PC\n    coordinates of projected vectors and reconstruct the original vectors.\n    Unless all the principal components have been retained, the\n    reconstructed vectors are different from the originals. But typically,\n    the difference is small if the number of components is large enough (but\n    still much smaller than the original vector dimensionality). As a\n    result, PCA is used.\n    @param vec coordinates of the vectors in the principal component\n    subspace, the layout and size are the same as of PCA::project output\n    vectors.\n     */\n    Mat backProject(InputArray vec) const;\n\n    /** @overload\n    @param vec coordinates of the vectors in the principal component\n    subspace, the layout and size are the same as of PCA::project output\n    vectors.\n    @param result reconstructed vectors; the layout and size are the same as\n    of PCA::project input vectors.\n     */\n    void backProject(InputArray vec, OutputArray result) const;\n\n    /** @brief write and load PCA matrix\n\n*/\n    void write(FileStorage& fs ) const;\n    void read(const FileNode& fs);\n\n    Mat eigenvectors; //!< eigenvectors of the covariation matrix\n    Mat eigenvalues; //!< eigenvalues of the covariation matrix\n    Mat mean; //!< mean value subtracted before the projection and added after the back projection\n};\n\n/** @example pca.cpp\n  An example using %PCA for dimensionality reduction while maintaining an amount of variance\n */\n\n/**\n   @brief Linear Discriminant Analysis\n   @todo document this class\n */\nclass CV_EXPORTS LDA\n{\npublic:\n    /** @brief constructor\n    Initializes a LDA with num_components (default 0).\n    */\n    explicit LDA(int num_components = 0);\n\n    /** Initializes and performs a Discriminant Analysis with Fisher's\n     Optimization Criterion on given data in src and corresponding labels\n     in labels. If 0 (or less) number of components are given, they are\n     automatically determined for given data in computation.\n    */\n    LDA(InputArrayOfArrays src, InputArray labels, int num_components = 0);\n\n    /** Serializes this object to a given filename.\n      */\n    void save(const String& filename) const;\n\n    /** Deserializes this object from a given filename.\n      */\n    void load(const String& filename);\n\n    /** Serializes this object to a given cv::FileStorage.\n      */\n    void save(FileStorage& fs) const;\n\n    /** Deserializes this object from a given cv::FileStorage.\n      */\n    void load(const FileStorage& node);\n\n    /** destructor\n      */\n    ~LDA();\n\n    /** Compute the discriminants for data in src (row aligned) and labels.\n      */\n    void compute(InputArrayOfArrays src, InputArray labels);\n\n    /** Projects samples into the LDA subspace.\n        src may be one or more row aligned samples.\n      */\n    Mat project(InputArray src);\n\n    /** Reconstructs projections from the LDA subspace.\n        src may be one or more row aligned projections.\n      */\n    Mat reconstruct(InputArray src);\n\n    /** Returns the eigenvectors of this LDA.\n      */\n    Mat eigenvectors() const { return _eigenvectors; }\n\n    /** Returns the eigenvalues of this LDA.\n      */\n    Mat eigenvalues() const { return _eigenvalues; }\n\n    static Mat subspaceProject(InputArray W, InputArray mean, InputArray src);\n    static Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);\n\nprotected:\n    bool _dataAsRow; // unused, but needed for 3.0 ABI compatibility.\n    int _num_components;\n    Mat _eigenvectors;\n    Mat _eigenvalues;\n    void lda(InputArrayOfArrays src, InputArray labels);\n};\n\n/** @brief Singular Value Decomposition\n\nClass for computing Singular Value Decomposition of a floating-point\nmatrix. The Singular Value Decomposition is used to solve least-square\nproblems, under-determined linear systems, invert matrices, compute\ncondition numbers, and so on.\n\nIf you want to compute a condition number of a matrix or an absolute value of\nits determinant, you do not need `u` and `vt`. You can pass\nflags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that full-size u\nand vt must be computed, which is not necessary most of the time.\n\n@sa invert, solve, eigen, determinant\n*/\nclass CV_EXPORTS SVD\n{\npublic:\n    enum Flags {\n        /** allow the algorithm to modify the decomposed matrix; it can save space and speed up\n            processing. currently ignored. */\n        MODIFY_A = 1,\n        /** indicates that only a vector of singular values `w` is to be processed, while u and vt\n            will be set to empty matrices */\n        NO_UV    = 2,\n        /** when the matrix is not square, by default the algorithm produces u and vt matrices of\n            sufficiently large size for the further A reconstruction; if, however, FULL_UV flag is\n            specified, u and vt will be full-size square orthogonal matrices.*/\n        FULL_UV  = 4\n    };\n\n    /** @brief the default constructor\n\n    initializes an empty SVD structure\n      */\n    SVD();\n\n    /** @overload\n    initializes an empty SVD structure and then calls SVD::operator()\n    @param src decomposed matrix.\n    @param flags operation flags (SVD::Flags)\n      */\n    SVD( InputArray src, int flags = 0 );\n\n    /** @brief the operator that performs SVD. The previously allocated u, w and vt are released.\n\n    The operator performs the singular value decomposition of the supplied\n    matrix. The u,`vt` , and the vector of singular values w are stored in\n    the structure. The same SVD structure can be reused many times with\n    different matrices. Each time, if needed, the previous u,`vt` , and w\n    are reclaimed and the new matrices are created, which is all handled by\n    Mat::create.\n    @param src decomposed matrix.\n    @param flags operation flags (SVD::Flags)\n      */\n    SVD& operator ()( InputArray src, int flags = 0 );\n\n    /** @brief decomposes matrix and stores the results to user-provided matrices\n\n    The methods/functions perform SVD of matrix. Unlike SVD::SVD constructor\n    and SVD::operator(), they store the results to the user-provided\n    matrices:\n\n    @code{.cpp}\n    Mat A, w, u, vt;\n    SVD::compute(A, w, u, vt);\n    @endcode\n\n    @param src decomposed matrix\n    @param w calculated singular values\n    @param u calculated left singular vectors\n    @param vt transposed matrix of right singular values\n    @param flags operation flags - see SVD::SVD.\n      */\n    static void compute( InputArray src, OutputArray w,\n                         OutputArray u, OutputArray vt, int flags = 0 );\n\n    /** @overload\n    computes singular values of a matrix\n    @param src decomposed matrix\n    @param w calculated singular values\n    @param flags operation flags - see SVD::Flags.\n      */\n    static void compute( InputArray src, OutputArray w, int flags = 0 );\n\n    /** @brief performs back substitution\n      */\n    static void backSubst( InputArray w, InputArray u,\n                           InputArray vt, InputArray rhs,\n                           OutputArray dst );\n\n    /** @brief solves an under-determined singular linear system\n\n    The method finds a unit-length solution x of a singular linear system\n    A\\*x = 0. Depending on the rank of A, there can be no solutions, a\n    single solution or an infinite number of solutions. In general, the\n    algorithm solves the following problem:\n    \\f[dst =  \\arg \\min _{x:  \\| x \\| =1}  \\| src  \\cdot x  \\|\\f]\n    @param src left-hand-side matrix.\n    @param dst found solution.\n      */\n    static void solveZ( InputArray src, OutputArray dst );\n\n    /** @brief performs a singular value back substitution.\n\n    The method calculates a back substitution for the specified right-hand\n    side:\n\n    \\f[\\texttt{x} =  \\texttt{vt} ^T  \\cdot diag( \\texttt{w} )^{-1}  \\cdot \\texttt{u} ^T  \\cdot \\texttt{rhs} \\sim \\texttt{A} ^{-1}  \\cdot \\texttt{rhs}\\f]\n\n    Using this technique you can either get a very accurate solution of the\n    convenient linear system, or the best (in the least-squares terms)\n    pseudo-solution of an overdetermined linear system.\n\n    @param rhs right-hand side of a linear system (u\\*w\\*v')\\*dst = rhs to\n    be solved, where A has been previously decomposed.\n\n    @param dst found solution of the system.\n\n    @note Explicit SVD with the further back substitution only makes sense\n    if you need to solve many linear systems with the same left-hand side\n    (for example, src ). If all you need is to solve a single system\n    (possibly with multiple rhs immediately available), simply call solve\n    add pass DECOMP_SVD there. It does absolutely the same thing.\n      */\n    void backSubst( InputArray rhs, OutputArray dst ) const;\n\n    /** @todo document */\n    template<typename _Tp, int m, int n, int nm> static\n    void compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt );\n\n    /** @todo document */\n    template<typename _Tp, int m, int n, int nm> static\n    void compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w );\n\n    /** @todo document */\n    template<typename _Tp, int m, int n, int nm, int nb> static\n    void backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst );\n\n    Mat u, w, vt;\n};\n\n/** @brief Random Number Generator\n\nRandom number generator. It encapsulates the state (currently, a 64-bit\ninteger) and has methods to return scalar random values and to fill\narrays with random values. Currently it supports uniform and Gaussian\n(normal) distributions. The generator uses Multiply-With-Carry\nalgorithm, introduced by G. Marsaglia (\n<http://en.wikipedia.org/wiki/Multiply-with-carry> ).\nGaussian-distribution random numbers are generated using the Ziggurat\nalgorithm ( <http://en.wikipedia.org/wiki/Ziggurat_algorithm> ),\nintroduced by G. Marsaglia and W. W. Tsang.\n*/\nclass CV_EXPORTS RNG\n{\npublic:\n    enum { UNIFORM = 0,\n           NORMAL  = 1\n         };\n\n    /** @brief constructor\n\n    These are the RNG constructors. The first form sets the state to some\n    pre-defined value, equal to 2\\*\\*32-1 in the current implementation. The\n    second form sets the state to the specified value. If you passed state=0\n    , the constructor uses the above default value instead to avoid the\n    singular random number sequence, consisting of all zeros.\n    */\n    RNG();\n    /** @overload\n    @param state 64-bit value used to initialize the RNG.\n    */\n    RNG(uint64 state);\n    /**The method updates the state using the MWC algorithm and returns the\n    next 32-bit random number.*/\n    unsigned next();\n\n    /**Each of the methods updates the state using the MWC algorithm and\n    returns the next random number of the specified type. In case of integer\n    types, the returned number is from the available value range for the\n    specified type. In case of floating-point types, the returned value is\n    from [0,1) range.\n    */\n    operator uchar();\n    /** @overload */\n    operator schar();\n    /** @overload */\n    operator ushort();\n    /** @overload */\n    operator short();\n    /** @overload */\n    operator unsigned();\n    /** @overload */\n    operator int();\n    /** @overload */\n    operator float();\n    /** @overload */\n    operator double();\n\n    /** @brief returns a random integer sampled uniformly from [0, N).\n\n    The methods transform the state using the MWC algorithm and return the\n    next random number. The first form is equivalent to RNG::next . The\n    second form returns the random number modulo N , which means that the\n    result is in the range [0, N) .\n    */\n    unsigned operator ()();\n    /** @overload\n    @param N upper non-inclusive boundary of the returned random number.\n    */\n    unsigned operator ()(unsigned N);\n\n    /** @brief returns uniformly distributed integer random number from [a,b) range\n\n    The methods transform the state using the MWC algorithm and return the\n    next uniformly-distributed random number of the specified type, deduced\n    from the input parameter type, from the range [a, b) . There is a nuance\n    illustrated by the following sample:\n\n    @code{.cpp}\n    RNG rng;\n\n    // always produces 0\n    double a = rng.uniform(0, 1);\n\n    // produces double from [0, 1)\n    double a1 = rng.uniform((double)0, (double)1);\n\n    // produces float from [0, 1)\n    double b = rng.uniform(0.f, 1.f);\n\n    // produces double from [0, 1)\n    double c = rng.uniform(0., 1.);\n\n    // may cause compiler error because of ambiguity:\n    //  RNG::uniform(0, (int)0.999999)? or RNG::uniform((double)0, 0.99999)?\n    double d = rng.uniform(0, 0.999999);\n    @endcode\n\n    The compiler does not take into account the type of the variable to\n    which you assign the result of RNG::uniform . The only thing that\n    matters to the compiler is the type of a and b parameters. So, if you\n    want a floating-point random number, but the range boundaries are\n    integer numbers, either put dots in the end, if they are constants, or\n    use explicit type cast operators, as in the a1 initialization above.\n    @param a lower inclusive boundary of the returned random numbers.\n    @param b upper non-inclusive boundary of the returned random numbers.\n      */\n    int uniform(int a, int b);\n    /** @overload */\n    float uniform(float a, float b);\n    /** @overload */\n    double uniform(double a, double b);\n\n    /** @brief Fills arrays with random numbers.\n\n    @param mat 2D or N-dimensional matrix; currently matrices with more than\n    4 channels are not supported by the methods, use Mat::reshape as a\n    possible workaround.\n    @param distType distribution type, RNG::UNIFORM or RNG::NORMAL.\n    @param a first distribution parameter; in case of the uniform\n    distribution, this is an inclusive lower boundary, in case of the normal\n    distribution, this is a mean value.\n    @param b second distribution parameter; in case of the uniform\n    distribution, this is a non-inclusive upper boundary, in case of the\n    normal distribution, this is a standard deviation (diagonal of the\n    standard deviation matrix or the full standard deviation matrix).\n    @param saturateRange pre-saturation flag; for uniform distribution only;\n    if true, the method will first convert a and b to the acceptable value\n    range (according to the mat datatype) and then will generate uniformly\n    distributed random numbers within the range [saturate(a), saturate(b)),\n    if saturateRange=false, the method will generate uniformly distributed\n    random numbers in the original range [a, b) and then will saturate them,\n    it means, for example, that\n    <tt>theRNG().fill(mat_8u, RNG::UNIFORM, -DBL_MAX, DBL_MAX)</tt> will likely\n    produce array mostly filled with 0's and 255's, since the range (0, 255)\n    is significantly smaller than [-DBL_MAX, DBL_MAX).\n\n    Each of the methods fills the matrix with the random values from the\n    specified distribution. As the new numbers are generated, the RNG state\n    is updated accordingly. In case of multiple-channel images, every\n    channel is filled independently, which means that RNG cannot generate\n    samples from the multi-dimensional Gaussian distribution with\n    non-diagonal covariance matrix directly. To do that, the method\n    generates samples from multi-dimensional standard Gaussian distribution\n    with zero mean and identity covariation matrix, and then transforms them\n    using transform to get samples from the specified Gaussian distribution.\n    */\n    void fill( InputOutputArray mat, int distType, InputArray a, InputArray b, bool saturateRange = false );\n\n    /** @brief Returns the next random number sampled from the Gaussian distribution\n    @param sigma standard deviation of the distribution.\n\n    The method transforms the state using the MWC algorithm and returns the\n    next random number from the Gaussian distribution N(0,sigma) . That is,\n    the mean value of the returned random numbers is zero and the standard\n    deviation is the specified sigma .\n    */\n    double gaussian(double sigma);\n\n    uint64 state;\n};\n\n/** @brief Mersenne Twister random number generator\n\nInspired by http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c\n@todo document\n */\nclass CV_EXPORTS RNG_MT19937\n{\npublic:\n    RNG_MT19937();\n    RNG_MT19937(unsigned s);\n    void seed(unsigned s);\n\n    unsigned next();\n\n    operator int();\n    operator unsigned();\n    operator float();\n    operator double();\n\n    unsigned operator ()(unsigned N);\n    unsigned operator ()();\n\n    /** @brief returns uniformly distributed integer random number from [a,b) range\n\n*/\n    int uniform(int a, int b);\n    /** @brief returns uniformly distributed floating-point random number from [a,b) range\n\n*/\n    float uniform(float a, float b);\n    /** @brief returns uniformly distributed double-precision floating-point random number from [a,b) range\n\n*/\n    double uniform(double a, double b);\n\nprivate:\n    enum PeriodParameters {N = 624, M = 397};\n    unsigned state[N];\n    int mti;\n};\n\n//! @} core_array\n\n//! @addtogroup core_cluster\n//!  @{\n\n/** @example kmeans.cpp\n  An example on K-means clustering\n*/\n\n/** @brief Finds centers of clusters and groups input samples around the clusters.\n\nThe function kmeans implements a k-means algorithm that finds the centers of cluster_count clusters\nand groups the input samples around the clusters. As an output, \\f$\\texttt{labels}_i\\f$ contains a\n0-based cluster index for the sample stored in the \\f$i^{th}\\f$ row of the samples matrix.\n\n@note\n-   (Python) An example on K-means clustering can be found at\n    opencv_source_code/samples/python/kmeans.py\n@param data Data for clustering. An array of N-Dimensional points with float coordinates is needed.\nExamples of this array can be:\n-   Mat points(count, 2, CV_32F);\n-   Mat points(count, 1, CV_32FC2);\n-   Mat points(1, count, CV_32FC2);\n-   std::vector\\<cv::Point2f\\> points(sampleCount);\n@param K Number of clusters to split the set by.\n@param bestLabels Input/output integer array that stores the cluster indices for every sample.\n@param criteria The algorithm termination criteria, that is, the maximum number of iterations and/or\nthe desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of the cluster\ncenters moves by less than criteria.epsilon on some iteration, the algorithm stops.\n@param attempts Flag to specify the number of times the algorithm is executed using different\ninitial labellings. The algorithm returns the labels that yield the best compactness (see the last\nfunction parameter).\n@param flags Flag that can take values of cv::KmeansFlags\n@param centers Output matrix of the cluster centers, one row per each cluster center.\n@return The function returns the compactness measure that is computed as\n\\f[\\sum _i  \\| \\texttt{samples} _i -  \\texttt{centers} _{ \\texttt{labels} _i} \\| ^2\\f]\nafter every attempt. The best (minimum) value is chosen and the corresponding labels and the\ncompactness value are returned by the function. Basically, you can use only the core of the\nfunction, set the number of attempts to 1, initialize labels each time using a custom algorithm,\npass them with the ( flags = KMEANS_USE_INITIAL_LABELS ) flag, and then choose the best\n(most-compact) clustering.\n*/\nCV_EXPORTS_W double kmeans( InputArray data, int K, InputOutputArray bestLabels,\n                            TermCriteria criteria, int attempts,\n                            int flags, OutputArray centers = noArray() );\n\n//! @} core_cluster\n\n//! @addtogroup core_basic\n//! @{\n\n/////////////////////////////// Formatted output of cv::Mat ///////////////////////////\n\n/** @todo document */\nclass CV_EXPORTS Formatted\n{\npublic:\n    virtual const char* next() = 0;\n    virtual void reset() = 0;\n    virtual ~Formatted();\n};\n\n/** @todo document */\nclass CV_EXPORTS Formatter\n{\npublic:\n    enum { FMT_DEFAULT = 0,\n           FMT_MATLAB  = 1,\n           FMT_CSV     = 2,\n           FMT_PYTHON  = 3,\n           FMT_NUMPY   = 4,\n           FMT_C       = 5\n         };\n\n    virtual ~Formatter();\n\n    virtual Ptr<Formatted> format(const Mat& mtx) const = 0;\n\n    virtual void set32fPrecision(int p = 8) = 0;\n    virtual void set64fPrecision(int p = 16) = 0;\n    virtual void setMultiline(bool ml = true) = 0;\n\n    static Ptr<Formatter> get(int fmt = FMT_DEFAULT);\n\n};\n\nstatic inline\nString& operator << (String& out, Ptr<Formatted> fmtd)\n{\n    fmtd->reset();\n    for(const char* str = fmtd->next(); str; str = fmtd->next())\n        out += cv::String(str);\n    return out;\n}\n\nstatic inline\nString& operator << (String& out, const Mat& mtx)\n{\n    return out << Formatter::get()->format(mtx);\n}\n\n//////////////////////////////////////// Algorithm ////////////////////////////////////\n\nclass CV_EXPORTS Algorithm;\n\ntemplate<typename _Tp> struct ParamType {};\n\n\n/** @brief This is a base class for all more or less complex algorithms in OpenCV\n\nespecially for classes of algorithms, for which there can be multiple implementations. The examples\nare stereo correspondence (for which there are algorithms like block matching, semi-global block\nmatching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians\nmodels, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck\netc.).\n\nHere is example of SIFT use in your application via Algorithm interface:\n@code\n    #include \"opencv2/opencv.hpp\"\n    #include \"opencv2/xfeatures2d.hpp\"\n    using namespace cv::xfeatures2d;\n\n    Ptr<Feature2D> sift = SIFT::create();\n    FileStorage fs(\"sift_params.xml\", FileStorage::READ);\n    if( fs.isOpened() ) // if we have file with parameters, read them\n    {\n        sift->read(fs[\"sift_params\"]);\n        fs.release();\n    }\n    else // else modify the parameters and store them; user can later edit the file to use different parameters\n    {\n        sift->setContrastThreshold(0.01f); // lower the contrast threshold, compared to the default value\n        {\n            WriteStructContext ws(fs, \"sift_params\", CV_NODE_MAP);\n            sift->write(fs);\n        }\n    }\n    Mat image = imread(\"myimage.png\", 0), descriptors;\n    vector<KeyPoint> keypoints;\n    sift->detectAndCompute(image, noArray(), keypoints, descriptors);\n@endcode\n */\nclass CV_EXPORTS_W Algorithm\n{\npublic:\n    Algorithm();\n    virtual ~Algorithm();\n\n    /** @brief Clears the algorithm state\n    */\n    CV_WRAP virtual void clear() {}\n\n    /** @brief Stores algorithm parameters in a file storage\n    */\n    virtual void write(FileStorage& fs) const { (void)fs; }\n\n    /** @brief Reads algorithm parameters from a file storage\n    */\n    virtual void read(const FileNode& fn) { (void)fn; }\n\n    /** @brief Returns true if the Algorithm is empty (e.g. in the very beginning or after unsuccessful read\n     */\n    virtual bool empty() const { return false; }\n\n    /** @brief Reads algorithm from the file node\n\n     This is static template method of Algorithm. It's usage is following (in the case of SVM):\n     @code\n     Ptr<SVM> svm = Algorithm::read<SVM>(fn);\n     @endcode\n     In order to make this method work, the derived class must overwrite Algorithm::read(const\n     FileNode& fn) and also have static create() method without parameters\n     (or with all the optional parameters)\n     */\n    template<typename _Tp> static Ptr<_Tp> read(const FileNode& fn)\n    {\n        Ptr<_Tp> obj = _Tp::create();\n        obj->read(fn);\n        return !obj->empty() ? obj : Ptr<_Tp>();\n    }\n\n    /** @brief Loads algorithm from the file\n\n     @param filename Name of the file to read.\n     @param objname The optional name of the node to read (if empty, the first top-level node will be used)\n\n     This is static template method of Algorithm. It's usage is following (in the case of SVM):\n     @code\n     Ptr<SVM> svm = Algorithm::load<SVM>(\"my_svm_model.xml\");\n     @endcode\n     In order to make this method work, the derived class must overwrite Algorithm::read(const\n     FileNode& fn).\n     */\n    template<typename _Tp> static Ptr<_Tp> load(const String& filename, const String& objname=String())\n    {\n        FileStorage fs(filename, FileStorage::READ);\n        FileNode fn = objname.empty() ? fs.getFirstTopLevelNode() : fs[objname];\n        Ptr<_Tp> obj = _Tp::create();\n        obj->read(fn);\n        return !obj->empty() ? obj : Ptr<_Tp>();\n    }\n\n    /** @brief Loads algorithm from a String\n\n     @param strModel The string variable containing the model you want to load.\n     @param objname The optional name of the node to read (if empty, the first top-level node will be used)\n\n     This is static template method of Algorithm. It's usage is following (in the case of SVM):\n     @code\n     Ptr<SVM> svm = Algorithm::loadFromString<SVM>(myStringModel);\n     @endcode\n     */\n    template<typename _Tp> static Ptr<_Tp> loadFromString(const String& strModel, const String& objname=String())\n    {\n        FileStorage fs(strModel, FileStorage::READ + FileStorage::MEMORY);\n        FileNode fn = objname.empty() ? fs.getFirstTopLevelNode() : fs[objname];\n        Ptr<_Tp> obj = _Tp::create();\n        obj->read(fn);\n        return !obj->empty() ? obj : Ptr<_Tp>();\n    }\n\n    /** Saves the algorithm to a file.\n     In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs). */\n    CV_WRAP virtual void save(const String& filename) const;\n\n    /** Returns the algorithm string identifier.\n     This string is used as top level xml/yml node tag when the object is saved to a file or string. */\n    CV_WRAP virtual String getDefaultName() const;\n};\n\nstruct Param {\n    enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7,\n           UNSIGNED_INT=8, UINT64=9, UCHAR=11 };\n};\n\n\n\ntemplate<> struct ParamType<bool>\n{\n    typedef bool const_param_type;\n    typedef bool member_type;\n\n    enum { type = Param::BOOLEAN };\n};\n\ntemplate<> struct ParamType<int>\n{\n    typedef int const_param_type;\n    typedef int member_type;\n\n    enum { type = Param::INT };\n};\n\ntemplate<> struct ParamType<double>\n{\n    typedef double const_param_type;\n    typedef double member_type;\n\n    enum { type = Param::REAL };\n};\n\ntemplate<> struct ParamType<String>\n{\n    typedef const String& const_param_type;\n    typedef String member_type;\n\n    enum { type = Param::STRING };\n};\n\ntemplate<> struct ParamType<Mat>\n{\n    typedef const Mat& const_param_type;\n    typedef Mat member_type;\n\n    enum { type = Param::MAT };\n};\n\ntemplate<> struct ParamType<std::vector<Mat> >\n{\n    typedef const std::vector<Mat>& const_param_type;\n    typedef std::vector<Mat> member_type;\n\n    enum { type = Param::MAT_VECTOR };\n};\n\ntemplate<> struct ParamType<Algorithm>\n{\n    typedef const Ptr<Algorithm>& const_param_type;\n    typedef Ptr<Algorithm> member_type;\n\n    enum { type = Param::ALGORITHM };\n};\n\ntemplate<> struct ParamType<float>\n{\n    typedef float const_param_type;\n    typedef float member_type;\n\n    enum { type = Param::FLOAT };\n};\n\ntemplate<> struct ParamType<unsigned>\n{\n    typedef unsigned const_param_type;\n    typedef unsigned member_type;\n\n    enum { type = Param::UNSIGNED_INT };\n};\n\ntemplate<> struct ParamType<uint64>\n{\n    typedef uint64 const_param_type;\n    typedef uint64 member_type;\n\n    enum { type = Param::UINT64 };\n};\n\ntemplate<> struct ParamType<uchar>\n{\n    typedef uchar const_param_type;\n    typedef uchar member_type;\n\n    enum { type = Param::UCHAR };\n};\n\n//! @} core_basic\n\n} //namespace cv\n\n#include \"opencv2/core/operations.hpp\"\n#include \"opencv2/core/cvstd.inl.hpp\"\n#include \"opencv2/core/utility.hpp\"\n#include \"opencv2/core/optim.hpp\"\n\n#endif /*__OPENCV_CORE_HPP__*/\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/cvconfig.h",
    "content": "/* OpenCV compiled as static or dynamic libs */\n#define BUILD_SHARED_LIBS\n\n/* Compile for 'real' NVIDIA GPU architectures */\n#define CUDA_ARCH_BIN \"\"\n\n/* Create PTX or BIN for 1.0 compute capability */\n/* #undef CUDA_ARCH_BIN_OR_PTX_10 */\n\n/* NVIDIA GPU features are used */\n#define CUDA_ARCH_FEATURES \"\"\n\n/* Compile for 'virtual' NVIDIA PTX architectures */\n#define CUDA_ARCH_PTX \"\"\n\n/* AVFoundation video libraries */\n/* #undef HAVE_AVFOUNDATION */\n\n/* V4L capturing support */\n/* #undef HAVE_CAMV4L */\n\n/* V4L2 capturing support */\n/* #undef HAVE_CAMV4L2 */\n\n/* Carbon windowing environment */\n/* #undef HAVE_CARBON */\n\n/* AMD's Basic Linear Algebra Subprograms Library*/\n/* #undef HAVE_CLAMDBLAS */\n\n/* AMD's OpenCL Fast Fourier Transform Library*/\n/* #undef HAVE_CLAMDFFT */\n\n/* Clp support */\n/* #undef HAVE_CLP */\n\n/* Cocoa API */\n/* #undef HAVE_COCOA */\n\n/* C= */\n/* #undef HAVE_CSTRIPES */\n\n/* NVidia Cuda Basic Linear Algebra Subprograms (BLAS) API*/\n/* #undef HAVE_CUBLAS */\n\n/* NVidia Cuda Runtime API*/\n/* #undef HAVE_CUDA */\n\n/* NVidia Cuda Fast Fourier Transform (FFT) API*/\n/* #undef HAVE_CUFFT */\n\n/* IEEE1394 capturing support */\n/* #undef HAVE_DC1394 */\n\n/* IEEE1394 capturing support - libdc1394 v2.x */\n/* #undef HAVE_DC1394_2 */\n\n/* DirectX */\n#define HAVE_DIRECTX\n/* #undef HAVE_DIRECTX_NV12 */\n#define HAVE_D3D11\n#define HAVE_D3D10\n#define HAVE_D3D9\n\n/* DirectShow Video Capture library */\n#define HAVE_DSHOW\n\n/* Eigen Matrix & Linear Algebra Library */\n/* #undef HAVE_EIGEN */\n\n/* FFMpeg video library */\n#define HAVE_FFMPEG\n\n/* ffmpeg's libswscale */\n#define HAVE_FFMPEG_SWSCALE\n\n/* ffmpeg in Gentoo */\n#define HAVE_GENTOO_FFMPEG\n\n/* Geospatial Data Abstraction Library */\n/* #undef HAVE_GDAL */\n\n/* GStreamer multimedia framework */\n/* #undef HAVE_GSTREAMER */\n\n/* GTK+ 2.0 Thread support */\n/* #undef HAVE_GTHREAD */\n\n/* GTK+ 2.x toolkit */\n/* #undef HAVE_GTK */\n\n/* Define to 1 if you have the <inttypes.h> header file. */\n/* #undef HAVE_INTTYPES_H */\n\n/* Intel Perceptual Computing SDK library */\n/* #undef HAVE_INTELPERC */\n\n/* Intel Integrated Performance Primitives */\n#define HAVE_IPP\n#define HAVE_IPP_ICV_ONLY\n\n/* Intel IPP Async */\n/* #undef HAVE_IPP_A */\n\n/* JPEG-2000 codec */\n#define HAVE_JASPER\n\n/* IJG JPEG codec */\n#define HAVE_JPEG\n\n/* libpng/png.h needs to be included */\n/* #undef HAVE_LIBPNG_PNG_H */\n\n/* V4L/V4L2 capturing support via libv4l */\n/* #undef HAVE_LIBV4L */\n\n/* Microsoft Media Foundation Capture library */\n/* #undef HAVE_MSMF */\n\n/* NVidia Video Decoding API*/\n/* #undef HAVE_NVCUVID */\n\n/* OpenCL Support */\n#define HAVE_OPENCL\n/* #undef HAVE_OPENCL_STATIC */\n/* #undef HAVE_OPENCL_SVM */\n\n/* OpenEXR codec */\n#define HAVE_OPENEXR\n\n/* OpenGL support*/\n/* #undef HAVE_OPENGL */\n\n/* OpenNI library */\n/* #undef HAVE_OPENNI */\n\n/* OpenNI library */\n/* #undef HAVE_OPENNI2 */\n\n/* PNG codec */\n#define HAVE_PNG\n\n/* Posix threads (pthreads) */\n/* #undef HAVE_PTHREADS */\n\n/* parallel_for with pthreads */\n/* #undef HAVE_PTHREADS_PF */\n\n/* Qt support */\n/* #undef HAVE_QT */\n\n/* Qt OpenGL support */\n/* #undef HAVE_QT_OPENGL */\n\n/* QuickTime video libraries */\n/* #undef HAVE_QUICKTIME */\n\n/* QTKit video libraries */\n/* #undef HAVE_QTKIT */\n\n/* Intel Threading Building Blocks */\n/* #undef HAVE_TBB */\n\n/* TIFF codec */\n#define HAVE_TIFF\n\n/* Unicap video capture library */\n/* #undef HAVE_UNICAP */\n\n/* Video for Windows support */\n#define HAVE_VFW\n\n/* V4L2 capturing support in videoio.h */\n/* #undef HAVE_VIDEOIO */\n\n/* Win32 UI */\n#define HAVE_WIN32UI\n\n/* XIMEA camera support */\n/* #undef HAVE_XIMEA */\n\n/* Xine video library */\n/* #undef HAVE_XINE */\n\n/* Define if your processor stores words with the most significant byte\n   first (like Motorola and SPARC, unlike Intel and VAX). */\n/* #undef WORDS_BIGENDIAN */\n\n/* gPhoto2 library */\n/* #undef HAVE_GPHOTO2 */\n\n/* VA library (libva) */\n/* #undef HAVE_VA */\n\n/* Intel VA-API/OpenCL */\n/* #undef HAVE_VA_INTEL */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/ar_hmdb.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_AR_HMDB_HPP\n#define OPENCV_DATASETS_AR_HMDB_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_ar\n//! @{\n\nstruct AR_hmdbObj : public Object\n{\n    int id;\n    std::string name;\n    std::string videoName;\n};\n\nclass CV_EXPORTS AR_hmdb : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<AR_hmdb> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/ar_sports.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_AR_SPORTS_HPP\n#define OPENCV_DATASETS_AR_SPORTS_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_ar\n//! @{\n\nstruct AR_sportsObj : public Object\n{\n    std::string videoUrl;\n    std::vector<int> labels;\n};\n\nclass CV_EXPORTS AR_sports : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<AR_sports> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/dataset.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_DATASET_HPP\n#define OPENCV_DATASETS_DATASET_HPP\n\n#include <string>\n#include <vector>\n\n#include <opencv2/core.hpp>\n\n/** @defgroup datasets Framework for working with different datasets\n\nThe datasets module includes classes for working with different datasets: load data, evaluate\ndifferent algorithms on them, contains benchmarks, etc.\n\nIt is planned to have:\n\n-   basic: loading code for all datasets to help start work with them.\n-   next stage: quick benchmarks for all datasets to show how to solve them using OpenCV and\nimplement evaluation code.\n-   finally: implement on OpenCV state-of-the-art algorithms, which solve these tasks.\n\n@{\n@defgroup datasets_ar Action Recognition\n\n### HMDB: A Large Human Motion Database\n\nImplements loading dataset:\n\n\"HMDB: A Large Human Motion Database\": <http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>\n\nUsage:\n-# From link above download dataset files: `hmdb51_org.rar` & `test_train_splits.rar`.\n-# Unpack them. Unpack all archives from directory: `hmdb51_org/` and remove them.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_ar_hmdb -p=/home/user/path_to_unpacked_folders/\n~~~\n\n#### Benchmark\n\nFor this dataset was implemented benchmark with accuracy: 0.107407 (using precomputed HOG/HOF\n\"STIP\" features from site, averaging for 3 splits)\n\nTo run this benchmark execute:\n~~~\n./opencv/build/bin/example_datasets_ar_hmdb_benchmark -p=/home/user/path_to_unpacked_folders/\n~~~\n\n@note\nPrecomputed features should be unpacked in the same folder: `/home/user/path_to_unpacked_folders/hmdb51_org_stips/`.\nAlso unpack all archives from directory: `hmdb51_org_stips/` and remove them.\n\n### Sports-1M %Dataset\n\nImplements loading dataset:\n\n\"Sports-1M Dataset\": <http://cs.stanford.edu/people/karpathy/deepvideo/>\n\nUsage:\n-# From link above download dataset files (`git clone https://code.google.com/p/sports-1m-dataset/`).\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_ar_sports -p=/home/user/path_to_downloaded_folders/\n~~~\n\n@defgroup datasets_fr Face Recognition\n\n### Adience\n\nImplements loading dataset:\n\n\"Adience\": <http://www.openu.ac.il/home/hassner/Adience/data.html>\n\nUsage:\n-# From link above download any dataset file: `faces.tar.gz\\aligned.tar.gz` and files with splits:\n`fold_0_data.txt-fold_4_data.txt`, `fold_frontal_0_data.txt-fold_frontal_4_data.txt`. (For\nface recognition task another splits should be created)\n-# Unpack dataset file to some folder and place split files into the same folder.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_fr_adience -p=/home/user/path_to_created_folder/\n~~~\n\n### Labeled Faces in the Wild\n\nImplements loading dataset:\n\n\"Labeled Faces in the Wild\": <http://vis-www.cs.umass.edu/lfw/>\n\nUsage:\n-# From link above download any dataset file:\n`lfw.tgz\\lfwa.tar.gz\\lfw-deepfunneled.tgz\\lfw-funneled.tgz` and files with pairs: 10 test\nsplits: `pairs.txt` and developer train split: `pairsDevTrain.txt`.\n-# Unpack dataset file and place `pairs.txt` and `pairsDevTrain.txt` in created folder.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_fr_lfw -p=/home/user/path_to_unpacked_folder/lfw2/\n~~~\n\n#### Benchmark\n\nFor this dataset was implemented benchmark with accuracy: 0.623833 +- 0.005223 (train split:\n`pairsDevTrain.txt`, dataset: lfwa)\n\nTo run this benchmark execute:\n~~~\n./opencv/build/bin/example_datasets_fr_lfw_benchmark -p=/home/user/path_to_unpacked_folder/lfw2/\n~~~\n\n@defgroup datasets_gr Gesture Recognition\n\n### ChaLearn Looking at People\n\nImplements loading dataset:\n\n\"ChaLearn Looking at People\": <http://gesture.chalearn.org/>\n\nUsage\n-# Follow instruction from site above, download files for dataset \"Track 3: Gesture Recognition\":\n`Train1.zip`-`Train5.zip`, `Validation1.zip`-`Validation3.zip` (Register on site: www.codalab.org and\naccept the terms and conditions of competition:\n<https://www.codalab.org/competitions/991#learn_the_details> There are three mirrors for\ndownloading dataset files. When I downloaded data only mirror: \"Universitat Oberta de Catalunya\"\nworks).\n-# Unpack train archives `Train1.zip`-`Train5.zip` to folder `Train/`, validation archives\n`Validation1.zip`-`Validation3.zip` to folder `Validation/`\n-# Unpack all archives in `Train/` & `Validation/` in the folders with the same names, for example:\n`Sample0001.zip` to `Sample0001/`\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_gr_chalearn -p=/home/user/path_to_unpacked_folders/\n~~~\n\n### Sheffield Kinect Gesture Dataset\n\nImplements loading dataset:\n\n\"Sheffield Kinect Gesture Dataset\": <http://lshao.staff.shef.ac.uk/data/SheffieldKinectGesture.htm>\n\nUsage:\n-# From link above download dataset files: `subject1_dep.7z`-`subject6_dep.7z`, `subject1_rgb.7z`-`subject6_rgb.7z`.\n-# Unpack them.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_gr_skig -p=/home/user/path_to_unpacked_folders/\n~~~\n\n@defgroup datasets_hpe Human Pose Estimation\n\n### HumanEva Dataset\n\nImplements loading dataset:\n\n\"HumanEva Dataset\": <http://humaneva.is.tue.mpg.de>\n\nUsage:\n-# From link above download dataset files for `HumanEva-I` (tar) & `HumanEva-II`.\n-# Unpack them to `HumanEva_1` & `HumanEva_2` accordingly.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_hpe_humaneva -p=/home/user/path_to_unpacked_folders/\n~~~\n\n### PARSE Dataset\n\nImplements loading dataset:\n\n\"PARSE Dataset\": <http://www.ics.uci.edu/~dramanan/papers/parse/>\n\nUsage:\n-# From link above download dataset file: `people.zip`.\n-# Unpack it.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_hpe_parse -p=/home/user/path_to_unpacked_folder/people_all/\n~~~\n\n@defgroup datasets_ir Image Registration\n\n### Affine Covariant Regions Datasets\n\nImplements loading dataset:\n\n\"Affine Covariant Regions Datasets\": <http://www.robots.ox.ac.uk/~vgg/data/data-aff.html>\n\nUsage:\n-# From link above download dataset files:\n`bark\\bikes\\boat\\graf\\leuven\\trees\\ubc\\wall.tar.gz`.\n-# Unpack them.\n-# To load data, for example, for \"bark\", run:\n```\n./opencv/build/bin/example_datasets_ir_affine -p=/home/user/path_to_unpacked_folder/bark/\n```\n\n### Robot Data Set\n\nImplements loading dataset:\n\n\"Robot Data Set, Point Feature Data Set – 2010\": <http://roboimagedata.compute.dtu.dk/?page_id=24>\n\nUsage:\n-# From link above download dataset files: `SET001_6.tar.gz`-`SET055_60.tar.gz`\n-# Unpack them to one folder.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_ir_robot -p=/home/user/path_to_unpacked_folder/\n~~~\n\n@defgroup datasets_is Image Segmentation\n\n### The Berkeley Segmentation Dataset and Benchmark\n\nImplements loading dataset:\n\n\"The Berkeley Segmentation Dataset and Benchmark\": <https://www.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/>\n\nUsage:\n-# From link above download dataset files: `BSDS300-human.tgz` & `BSDS300-images.tgz`.\n-# Unpack them.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_is_bsds -p=/home/user/path_to_unpacked_folder/BSDS300/\n~~~\n\n### Weizmann Segmentation Evaluation Database\n\nImplements loading dataset:\n\n\"Weizmann Segmentation Evaluation Database\": <http://www.wisdom.weizmann.ac.il/~vision/Seg_Evaluation_DB/>\n\nUsage:\n-# From link above download dataset files: `Weizmann_Seg_DB_1obj.ZIP` & `Weizmann_Seg_DB_2obj.ZIP`.\n-# Unpack them.\n-# To load data, for example, for `1 object` dataset, run:\n~~~\n./opencv/build/bin/example_datasets_is_weizmann -p=/home/user/path_to_unpacked_folder/1obj/\n~~~\n\n@defgroup datasets_msm Multiview Stereo Matching\n\n### EPFL Multi-View Stereo\n\nImplements loading dataset:\n\n\"EPFL Multi-View Stereo\": <http://cvlab.epfl.ch/data/strechamvs>\n\nUsage:\n-# From link above download dataset files:\n`castle_dense\\castle_dense_large\\castle_entry\\fountain\\herzjesu_dense\\herzjesu_dense_large_bounding\\cameras\\images\\p.tar.gz`.\n-# Unpack them in separate folder for each object. For example, for \"fountain\", in folder `fountain/` :\n`fountain_dense_bounding.tar.gz -> bounding/`,\n`fountain_dense_cameras.tar.gz -> camera/`,\n`fountain_dense_images.tar.gz -> png/`,\n`fountain_dense_p.tar.gz -> P/`\n-# To load data, for example, for \"fountain\", run:\n~~~\n./opencv/build/bin/example_datasets_msm_epfl -p=/home/user/path_to_unpacked_folder/fountain/\n~~~\n\n### Stereo – Middlebury Computer Vision\n\nImplements loading dataset:\n\n\"Stereo – Middlebury Computer Vision\": <http://vision.middlebury.edu/mview/>\n\nUsage:\n-# From link above download dataset files:\n`dino\\dinoRing\\dinoSparseRing\\temple\\templeRing\\templeSparseRing.zip`\n-# Unpack them.\n-# To load data, for example \"temple\" dataset, run:\n~~~\n./opencv/build/bin/example_datasets_msm_middlebury -p=/home/user/path_to_unpacked_folder/temple/\n~~~\n\n@defgroup datasets_or Object Recognition\n\n### ImageNet\n\nImplements loading dataset:  \"ImageNet\": <http://www.image-net.org/>\n\nUsage:\n-# From link above download dataset files:\n`ILSVRC2010_images_train.tar\\ILSVRC2010_images_test.tar\\ILSVRC2010_images_val.tar` & devkit:\n`ILSVRC2010_devkit-1.0.tar.gz` (Implemented loading of 2010 dataset as only this dataset has ground\ntruth for test data, but structure for ILSVRC2014 is similar)\n-# Unpack them to: `some_folder/train/`, `some_folder/test/`, `some_folder/val` &\n`some_folder/ILSVRC2010_validation_ground_truth.txt`,\n`some_folder/ILSVRC2010_test_ground_truth.txt`.\n-# Create file with labels: `some_folder/labels.txt`, for example, using python script below (each\nfile's row format: `synset,labelID,description`. For example: \"n07751451,18,plum\").\n-# Unpack all tar files in train.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_or_imagenet -p=/home/user/some_folder/\n~~~\n\nPython script to parse `meta.mat`:\n~~~{py}\n    import scipy.io\n    meta_mat = scipy.io.loadmat(\"devkit-1.0/data/meta.mat\")\n\n    labels_dic = dict((m[0][1][0], m[0][0][0][0]-1) for m in meta_mat['synsets']\n    label_names_dic = dict((m[0][1][0], m[0][2][0]) for m in meta_mat['synsets']\n\n    for label in labels_dic.keys():\n        print \"{0},{1},{2}\".format(label, labels_dic[label], label_names_dic[label])\n~~~\n\n### MNIST\n\nImplements loading dataset:\n\n\"MNIST\": <http://yann.lecun.com/exdb/mnist/>\n\nUsage:\n-# From link above download dataset files:\n`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`, `train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`.\n-# Unpack them.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_or_mnist -p=/home/user/path_to_unpacked_files/\n~~~\n\n### SUN Database\n\nImplements loading dataset:\n\n\"SUN Database, Scene Recognition Benchmark. SUN397\": <http://vision.cs.princeton.edu/projects/2010/SUN/>\n\nUsage:\n-# From link above download dataset file: `SUN397.tar` & file with splits: `Partitions.zip`\n-# Unpack `SUN397.tar` into folder: `SUN397/` & `Partitions.zip` into folder: `SUN397/Partitions/`\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_or_sun -p=/home/user/path_to_unpacked_files/SUN397/\n~~~\n\n@defgroup datasets_pd Pedestrian Detection\n\n### Caltech Pedestrian Detection Benchmark\n\nImplements loading dataset:\n\n\"Caltech Pedestrian Detection Benchmark\": <http://www.vision.caltech.edu/Image_Datasets/CaltechPedestrians/>\n\n@note First version of Caltech Pedestrian dataset loading. Code to unpack all frames from seq files\ncommented as their number is huge! So currently load only meta information without data. Also\nground truth isn't processed, as need to convert it from mat files first.\n\nUsage:\n-# From link above download dataset files: `set00.tar`-`set10.tar`.\n-# Unpack them to separate folder.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_pd_caltech -p=/home/user/path_to_unpacked_folders/\n~~~\n\n@defgroup datasets_slam SLAM\n\n### KITTI Vision Benchmark\n\nImplements loading dataset:\n\n\"KITTI Vision Benchmark\": <http://www.cvlibs.net/datasets/kitti/eval_odometry.php>\n\nUsage:\n-# From link above download \"Odometry\" dataset files:\n`data_odometry_gray\\data_odometry_color\\data_odometry_velodyne\\data_odometry_poses\\data_odometry_calib.zip`.\n-# Unpack `data_odometry_poses.zip`, it creates folder `dataset/poses/`. After that unpack\n`data_odometry_gray.zip`, `data_odometry_color.zip`, `data_odometry_velodyne.zip`. Folder\n`dataset/sequences/` will be created with folders `00/..21/`. Each of these folders will contain:\n`image_0/`, `image_1/`, `image_2/`, `image_3/`, `velodyne/` and files `calib.txt` & `times.txt`.\nThese two last files will be replaced after unpacking `data_odometry_calib.zip` at the end.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_slam_kitti -p=/home/user/path_to_unpacked_folder/dataset/\n~~~\n\n### TUMindoor Dataset\n\nImplements loading dataset:\n\n\"TUMindoor Dataset\": <http://www.navvis.lmt.ei.tum.de/dataset/>\n\nUsage:\n-# From link above download dataset files: `dslr\\info\\ladybug\\pointcloud.tar.bz2` for each dataset:\n`11-11-28 (1st floor)\\11-12-13 (1st floor N1)\\11-12-17a (4th floor)\\11-12-17b (3rd floor)\\11-12-17c (Ground I)\\11-12-18a (Ground II)\\11-12-18b (2nd floor)`\n-# Unpack them in separate folder for each dataset.\n`dslr.tar.bz2 -> dslr/`,\n`info.tar.bz2 -> info/`,\n`ladybug.tar.bz2 -> ladybug/`,\n`pointcloud.tar.bz2 -> pointcloud/`.\n-# To load each dataset run:\n~~~\n./opencv/build/bin/example_datasets_slam_tumindoor -p=/home/user/path_to_unpacked_folders/\n~~~\n\n@defgroup datasets_tr Text Recognition\n\n### The Chars74K Dataset\n\nImplements loading dataset:\n\n\"The Chars74K Dataset\": <http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/>\n\nUsage:\n-# From link above download dataset files:\n`EnglishFnt\\EnglishHnd\\EnglishImg\\KannadaHnd\\KannadaImg.tgz`, `ListsTXT.tgz`.\n-# Unpack them.\n-# Move `.m` files from folder `ListsTXT/` to appropriate folder. For example,\n`English/list_English_Img.m` for `EnglishImg.tgz`.\n-# To load data, for example \"EnglishImg\", run:\n~~~\n./opencv/build/bin/example_datasets_tr_chars -p=/home/user/path_to_unpacked_folder/English/\n~~~\n\n### The Street View Text Dataset\n\nImplements loading dataset:\n\n\"The Street View Text Dataset\": <http://vision.ucsd.edu/~kai/svt/>\n\nUsage:\n-# From link above download dataset file: `svt.zip`.\n-# Unpack it.\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_tr_svt -p=/home/user/path_to_unpacked_folder/svt/svt1/\n~~~\n\n#### Benchmark\n\nFor this dataset was implemented benchmark with accuracy (mean f1): 0.217\n\nTo run benchmark execute:\n~~~\n./opencv/build/bin/example_datasets_tr_svt_benchmark -p=/home/user/path_to_unpacked_folders/svt/svt1/\n~~~\n\n@defgroup datasets_track Tracking\n\n### VOT 2015 Database\n\nImplements loading dataset:\n\n\"VOT 2015 dataset comprises 60 short sequences showing various objects in challenging backgrounds.\nThe sequences were chosen from a large pool of sequences including the ALOV dataset, OTB2 dataset,\nnon-tracking datasets, Computer Vision Online, Professor Bob Fisher’s Image Database, Videezy,\nCenter for Research in Computer Vision, University of Central Florida, USA, NYU Center for Genomics\nand Systems Biology, Data Wrangling, Open Access Directory and Learning and Recognition in Vision\nGroup, INRIA, France. The VOT sequence selection protocol was applied to obtain a representative\nset of challenging sequences.\": <http://box.vicos.si/vot/vot2015.zip>\n\nUsage:\n-# From link above download dataset file: `vot2015.zip`\n-# Unpack `vot2015.zip` into folder: `VOT2015/`\n-# To load data run:\n~~~\n./opencv/build/bin/example_datasets_track_vot -p=/home/user/path_to_unpacked_files/VOT2015/\n~~~\n@}\n\n*/\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets\n//! @{\n\nstruct Object\n{\n};\n\nclass CV_EXPORTS Dataset\n{\npublic:\n    Dataset() {}\n    virtual ~Dataset() {}\n\n    virtual void load(const std::string &path) = 0;\n\n    std::vector< Ptr<Object> >& getTrain(int splitNum = 0);\n    std::vector< Ptr<Object> >& getTest(int splitNum = 0);\n    std::vector< Ptr<Object> >& getValidation(int splitNum = 0);\n\n    int getNumSplits() const;\n\nprotected:\n    std::vector< std::vector< Ptr<Object> > > train;\n    std::vector< std::vector< Ptr<Object> > > test;\n    std::vector< std::vector< Ptr<Object> > > validation;\n\nprivate:\n    std::vector< Ptr<Object> > empty;\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/fr_adience.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_FR_ADIENCE_HPP\n#define OPENCV_DATASETS_FR_ADIENCE_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_fr\n//! @{\n\nenum genderType\n{\n    male = 0,\n    female,\n    none\n};\n\nstruct FR_adienceObj : public Object\n{\n    std::string user_id;\n    std::string original_image;\n    int face_id;\n    std::string age;\n    genderType gender;\n    int x;\n    int y;\n    int dx;\n    int dy;\n    int tilt_ang;\n    int fiducial_yaw_angle;\n    int fiducial_score;\n};\n\nclass CV_EXPORTS FR_adience : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<FR_adience> create();\n\n    std::vector<std::string> paths;\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/fr_lfw.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_FR_LFW_HPP\n#define OPENCV_DATASETS_FR_LFW_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_fr\n//! @{\n\nstruct FR_lfwObj : public Object\n{\n    std::string image1, image2;\n    bool same;\n};\n\nclass CV_EXPORTS FR_lfw : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<FR_lfw> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/gr_chalearn.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_GR_CHALEARN_HPP\n#define OPENCV_DATASETS_GR_CHALEARN_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_gr\n//! @{\n\nstruct groundTruth\n{\n    int gestureID, initialFrame, lastFrame;\n};\n\nstruct join\n{\n    double Wx, Wy, Wz, Rx, Ry, Rz, Rw, Px, Py;\n};\n\nstruct skeleton\n{\n    join s[20];\n};\n\nstruct GR_chalearnObj : public Object\n{\n    std::string name, nameColor, nameDepth, nameUser;\n    int numFrames, fps, depth;\n    std::vector<groundTruth> groundTruths;\n    std::vector<skeleton> skeletons;\n};\n\nclass CV_EXPORTS GR_chalearn : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<GR_chalearn> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/gr_skig.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_GR_SKIG_HPP\n#define OPENCV_DATASETS_GR_SKIG_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_gr\n//! @{\n\nenum actionType\n{\n    circle = 1,\n    triangle,\n    updown,\n    rightleft,\n    wave,\n    z,\n    cross,\n    comehere,\n    turnaround,\n    pat\n};\n\nenum poseType\n{\n    fist = 1,\n    index,\n    flat\n};\n\nenum illuminationType\n{\n    light = 1,\n    dark\n};\n\nenum backgroundType\n{\n    woodenBoard = 1,\n    whitePaper,\n    paperWithCharacters\n};\n\nstruct GR_skigObj : public Object\n{\n    std::string rgb;\n    std::string dep;\n    char person; // 1..6\n    backgroundType background;\n    illuminationType illumination;\n    poseType pose;\n    actionType type;\n};\n\nclass CV_EXPORTS GR_skig : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<GR_skig> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/hpe_humaneva.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_HPE_HUMANEVA_HPP\n#define OPENCV_DATASETS_HPE_HUMANEVA_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_hpe\n//! @{\n\nstruct HPE_humanevaObj : public Object\n{\n    char person; // 1..4\n    std::string action;\n    int type1;\n    std::string type2;\n    Matx13d ofs;\n    std::string fileName;\n    std::vector<std::string> imageNames; // for HumanEva_II\n};\n\nenum datasetType\n{\n    humaneva_1 = 1,\n    humaneva_2\n};\n\nclass CV_EXPORTS HPE_humaneva : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<HPE_humaneva> create(int num=humaneva_1);\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/hpe_parse.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_HPE_PARSE_HPP\n#define OPENCV_DATASETS_HPE_PARSE_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_hpe\n//! @{\n\nstruct HPE_parseObj : public Object\n{\n    std::string name;\n};\n\nclass CV_EXPORTS HPE_parse : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<HPE_parse> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/ir_affine.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_IR_AFFINE_HPP\n#define OPENCV_DATASETS_IR_AFFINE_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n#include <opencv2/core/matx.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_ir\n//! @{\n\nstruct IR_affineObj : public Object\n{\n    std::string imageName;\n    Matx33d mat;\n};\n\nclass CV_EXPORTS IR_affine : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<IR_affine> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/ir_robot.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_IR_ROBOT_HPP\n#define OPENCV_DATASETS_IR_ROBOT_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_ir\n//! @{\n\n// calibration matrix from calibrationFile.mat\n// 2.8290e+03   0.0000e+00   8.0279e+02\n// 0.0000e+00   2.8285e+03   6.1618e+02\n// 0.0000e+00   0.0000e+00   1.0000e+00\n\nstruct cameraPos\n{\n    std::vector<std::string> images;\n};\n\nstruct IR_robotObj : public Object\n{\n    std::string name;\n    std::vector<cameraPos> pos;\n};\n\nclass CV_EXPORTS IR_robot : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<IR_robot> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/is_bsds.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_IS_BSDS_HPP\n#define OPENCV_DATASETS_IS_BSDS_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_is\n//! @{\n\nstruct IS_bsdsObj : public Object\n{\n    std::string name;\n};\n\nclass CV_EXPORTS IS_bsds : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<IS_bsds> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/is_weizmann.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_IS_WEIZMANN_HPP\n#define OPENCV_DATASETS_IS_WEIZMANN_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_is\n//! @{\n\nstruct IS_weizmannObj : public Object\n{\n    std::string imageName;\n    std::string srcBw;\n    std::string srcColor;\n    std::string humanSeg; // TODO: read human segmented\n};\n\nclass CV_EXPORTS IS_weizmann : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<IS_weizmann> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/msm_epfl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_MSM_EPFL_HPP\n#define OPENCV_DATASETS_MSM_EPFL_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_msm\n//! @{\n\nstruct cameraParam\n{\n    Matx33d mat1;\n    double mat2[3];\n    Matx33d mat3;\n    double mat4[3];\n    int imageWidth, imageHeight;\n};\n\nstruct MSM_epflObj : public Object\n{\n    std::string imageName;\n    Matx23d bounding;\n    Matx34d p;\n    cameraParam camera;\n};\n\nclass CV_EXPORTS MSM_epfl : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<MSM_epfl> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/msm_middlebury.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_MSM_MIDDLEBURY_HPP\n#define OPENCV_DATASETS_MSM_MIDDLEBURY_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_msm\n//! @{\n\nstruct MSM_middleburyObj : public Object\n{\n    std::string imageName;\n    Matx33d k;\n    Matx33d r;\n    double t[3];\n};\n\nclass CV_EXPORTS MSM_middlebury : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<MSM_middlebury> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/or_imagenet.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_OR_IMAGENET_HPP\n#define OPENCV_DATASETS_OR_IMAGENET_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_or\n//! @{\n\nstruct OR_imagenetObj : public Object\n{\n    int id;\n    std::string image;\n};\n\nclass CV_EXPORTS OR_imagenet : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<OR_imagenet> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/or_mnist.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_OR_MNIST_HPP\n#define OPENCV_DATASETS_OR_MNIST_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_or\n//! @{\n\nstruct OR_mnistObj : public Object\n{\n    char label; // 0..9\n    Mat image; // [28][28]\n};\n\nclass CV_EXPORTS OR_mnist : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<OR_mnist> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/or_pascal.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_VOC_PASCAL_HPP\n#define OPENCV_DATASETS_VOC_PASCAL_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_or\n//! @{\nstruct PascalPart: public Object\n{\n    std::string name;\n    int xmin;\n    int ymin;\n    int xmax;\n    int ymax;\n};\n\nstruct PascalObj: public PascalPart\n{\n    std::string pose;\n    bool truncated;\n    bool difficult;\n    bool occluded;\n\n    std::vector<PascalPart> parts;\n};\n\nstruct OR_pascalObj : public Object\n{\n    std::string filename;\n\n    int width;\n    int height;\n    int depth;\n\n    std::vector<PascalObj> objects;\n};\n\nclass CV_EXPORTS OR_pascal : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<OR_pascal> create();\n};\n\n//! @}\n\n}// namespace dataset\n}// namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/or_sun.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_OR_SUN_HPP\n#define OPENCV_DATASETS_OR_SUN_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_or\n//! @{\n\nstruct OR_sunObj : public Object\n{\n    int label;\n    std::string name;\n};\n\nclass CV_EXPORTS OR_sun : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<OR_sun> create();\n\n    std::vector<std::string> paths;\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/pd_caltech.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_PD_CALTECH_HPP\n#define OPENCV_DATASETS_PD_CALTECH_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_pd\n//! @{\n\nstruct PD_caltechObj : public Object\n{\n    //double groundTrue[][];\n    //Mat image;\n    std::string name;\n    std::vector< std::string > imageNames;\n};\n\n//\n// first version of Caltech Pedestrian dataset loading\n// code to unpack all frames from seq files commented as their number is huge\n// so currently load only meta information without data\n//\n// also ground truth isn't processed, as need to convert it from mat files first\n//\n\nclass CV_EXPORTS PD_caltech : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<PD_caltech> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/pd_inria.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_PD_INRIA_HPP\n#define OPENCV_DATASETS_PD_INRIA_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_pd\n//! @{\n\nenum sampleType \n{\n    POS = 0,\n    NEG = 1\n};\n\nstruct PD_inriaObj : public Object\n{\n    // image file name\n    std::string filename;\n    \n    // positive or negative\n    sampleType sType;\n\n    // image size\n    int width;\n    int height;\n    int depth;\n\n    // bounding boxes\n    std::vector< Rect > bndboxes;\n};\n\nclass CV_EXPORTS PD_inria : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<PD_inria> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/slam_kitti.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_SLAM_KITTI_HPP\n#define OPENCV_DATASETS_SLAM_KITTI_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_slam\n//! @{\n\nstruct pose\n{\n    double elem[12];\n};\n\nstruct SLAM_kittiObj : public Object\n{\n    std::string name;\n    std::vector<std::string> images[4];\n    std::vector<std::string> velodyne;\n    std::vector<double> times, p[4];\n    std::vector<pose> posesArray;\n};\n\nclass CV_EXPORTS SLAM_kitti : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<SLAM_kitti> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/slam_tumindoor.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_SLAM_TUMINDOOR_HPP\n#define OPENCV_DATASETS_SLAM_TUMINDOOR_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_slam\n//! @{\n\nenum imageType\n{\n    LEFT = 0,\n    RIGHT,\n    LADYBUG\n};\n\nstruct SLAM_tumindoorObj : public Object\n{\n    std::string name;\n    Matx44d transformMat;\n    imageType type;\n};\n\nclass CV_EXPORTS SLAM_tumindoor : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<SLAM_tumindoor> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/tr_chars.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_TR_CHARS_HPP\n#define OPENCV_DATASETS_TR_CHARS_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_tr\n//! @{\n\nstruct TR_charsObj : public Object\n{\n    std::string imgName;\n    int label;\n};\n\nclass CV_EXPORTS TR_chars : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<TR_chars> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/tr_icdar.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_TR_ICDAR_HPP\n#define OPENCV_DATASETS_TR_ICDAR_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_tr\n//! @{\n\nstruct word\n{\n    std::string value;\n    int height, width, x, y;\n};\n\nstruct TR_icdarObj : public Object\n{\n    std::string fileName;\n    std::vector<std::string> lex100;\n    std::vector<std::string> lexFull;\n    std::vector<word> words;\n};\n\nclass CV_EXPORTS TR_icdar : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<TR_icdar> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/tr_svt.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_TR_SVT_HPP\n#define OPENCV_DATASETS_TR_SVT_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_tr\n//! @{\n\nstruct tag\n{\n    std::string value;\n    int height, width, x, y;\n};\n\nstruct TR_svtObj : public Object\n{\n    std::string fileName;\n    std::vector<std::string> lex;\n    std::vector<tag> tags;\n};\n\nclass CV_EXPORTS TR_svt : public Dataset\n{\npublic:\n    virtual void load(const std::string &path) = 0;\n\n    static Ptr<TR_svt> create();\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/track_vot.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_TRACK_VOT_HPP\n#define OPENCV_DATASETS_TRACK_VOT_HPP\n\n#include <string>\n#include <vector>\n\n#include \"opencv2/datasets/dataset.hpp\"\n#include \"opencv2/datasets/util.hpp\"\n\nusing namespace std;\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets_track\n//! @{\n\nstruct TRACK_votObj : public Object\n{\n    int id;\n    std::string imagePath;\n    vector <Point2d> gtbb;\n};\n\nclass CV_EXPORTS TRACK_vot : public Dataset\n{\npublic:\n    static Ptr<TRACK_vot> create();\n\n    virtual void load(const std::string &path) = 0;\n\n    virtual int getDatasetsNum() = 0;\n\n    virtual int getDatasetLength(int id) = 0;\n\n    virtual bool initDataset(int id) = 0;\n\n    virtual bool getNextFrame(Mat &frame) = 0;\n\n    virtual vector <Point2d> getGT() = 0;\n\nprotected:\n    vector <vector <Ptr<TRACK_votObj> > > data;\n    int activeDatasetID;\n    int frameCounter;\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/datasets/util.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_DATASETS_UTIL_HPP\n#define OPENCV_DATASETS_UTIL_HPP\n\n#include <string>\n#include <vector>\n\n#include <cstdio>\n#include <cstdlib> // atoi, atof\n\n#include <fstream>\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace datasets\n{\n\n//! @addtogroup datasets\n//! @{\n\nvoid CV_EXPORTS split(const std::string &s, std::vector<std::string> &elems, char delim);\n\nvoid CV_EXPORTS createDirectory(const std::string &path);\n\nvoid CV_EXPORTS getDirList(const std::string &dirName, std::vector<std::string> &fileNames);\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/dnn/blob.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_DNN_DNN_BLOB_HPP__\n#define __OPENCV_DNN_DNN_BLOB_HPP__\n#include <opencv2/core.hpp>\n#include <vector>\n#include <ostream>\n\nnamespace cv\n{\nnamespace dnn\n{\n//! @addtogroup dnn\n//! @{\n\n    /** @brief Lightweight class for storing and processing a shape of blob (or anything else). */\n    struct BlobShape\n    {\n        explicit BlobShape(int ndims = 4, int fill = 1);    //!< Creates n-dim shape and fill its by @p fill\n        BlobShape(int num, int cn, int rows, int cols);     //!< Creates 4-dim shape [@p num, @p cn, @p rows, @p cols]\n        BlobShape(int ndims, const int *sizes);             //!< Creates n-dim shape from the @p sizes array\n        BlobShape(const std::vector<int> &sizes);           //!< Creates n-dim shape from the @p sizes vector\n        template<int n>\n        BlobShape(const Vec<int, n> &shape);                //!< Creates n-dim shape from @ref cv::Vec\n\n        /** @brief Returns number of dimensions. */\n        int dims() const;\n\n        /** @brief Returns reference to the size of the specified @p axis.\n         *\n         * Negative @p axis is supported, in this case a counting starts from the last axis,\n         * i. e. -1 corresponds to last axis.\n         * If non-existing axis was passed then an error will be generated.\n         */\n        int &size(int axis);\n\n        /** @brief Returns the size of the specified @p axis.\n         *  @see size()\n         */\n        int size(int axis) const;\n\n        int operator[](int axis) const; //!< Does the same thing as size(axis).\n        int &operator[](int axis);      //!< Does the same thing as size(int) const.\n\n        /** @brief Returns the size of the specified @p axis.\n         *\n         * Does the same thing as size(int) const, but if non-existing axis will be passed then 1 will be returned,\n         * therefore this function always finishes successfully.\n         */\n        int xsize(int axis) const;\n\n        /** @brief Returns the product of all sizes of axes. */\n        ptrdiff_t total();\n\n        /** @brief Returns pointer to the first element of continuous size array. */\n        const int *ptr() const;\n\n        /** @brief Checks equality of two shapes. */\n        bool equal(const BlobShape &other) const;\n\n        bool operator== (const BlobShape &r) const;\n\n    private:\n        cv::AutoBuffer<int,4> sz;\n    };\n\n\n    /** @brief This class provides methods for continuous n-dimensional CPU and GPU array processing.\n     *\n     * The class is realized as a wrapper over @ref cv::Mat and @ref cv::UMat.\n     * It will support methods for switching and logical synchronization between CPU and GPU.\n    */\n    class CV_EXPORTS Blob\n    {\n    public:\n        explicit Blob();\n\n        /** @brief Constructs blob with specified @p shape and @p type. */\n        explicit Blob(const BlobShape &shape, int type = CV_32F);\n\n        /** @brief Constucts 4-dimensional blob (so-called batch) from image or array of images.\n         * @param image 2-dimensional multi-channel or 3-dimensional single-channel image (or array of images)\n         * @param dstCn specify size of second axis of ouptut blob\n        */\n        explicit Blob(InputArray image, int dstCn = -1);\n\n        /** @brief Creates blob with specified @p shape and @p type. */\n        void create(const BlobShape &shape, int type = CV_32F);\n\n        /** @brief Creates blob from cv::Mat or cv::UMat without copying the data */\n        void fill(InputArray in);\n        /** @brief Creates blob from user data.\n         *  @details If @p deepCopy is false then CPU data will not be allocated.\n         */\n        void fill(const BlobShape &shape, int type, void *data, bool deepCopy = true);\n\n        Mat& matRef();                      //!< Returns reference to cv::Mat, containing blob data.\n        const Mat& matRefConst() const;     //!< Returns reference to cv::Mat, containing blob data, for read-only purposes.\n        UMat &umatRef();                    //!< Returns reference to cv::UMat, containing blob data (not implemented yet).\n        const UMat &umatRefConst() const;   //!< Returns reference to cv::UMat, containing blob data, for read-only purposes (not implemented yet).\n\n        /** @brief Returns number of blob dimensions. */\n        int dims() const;\n\n        /** @brief Returns the size of the specified @p axis.\n         *\n         * Negative @p axis is supported, in this case a counting starts from the last axis,\n         * i. e. -1 corresponds to last axis.\n         * If non-existing axis was passed then an error will be generated.\n         */\n        int size(int axis) const;\n\n        /** @brief Returns the size of the specified @p axis.\n         *\n         * Does the same thing as size(int) const, but if non-existing axis will be passed then 1 will be returned,\n         * therefore this function always finishes successfully.\n         */\n        int xsize(int axis) const;\n\n        /** @brief Computes the product of sizes of axes among the specified axes range [@p startAxis; @p endAxis).\n         * @param startAxis the first axis to include in the range.\n         * @param endAxis   the first axis to exclude from the range.\n         * @details Negative axis indexing can be used.\n         */\n        size_t total(int startAxis = 0, int endAxis = INT_MAX) const;\n\n        /** @brief Converts @p axis index to canonical format (where 0 <= axis < dims()). */\n        int canonicalAxis(int axis) const;\n\n        /** @brief Returns shape of the blob. */\n        BlobShape shape() const;\n\n        /** @brief Checks equality of two blobs shapes. */\n        bool equalShape(const Blob &other) const;\n\n        /** @brief Returns slice of first two dimensions.\n         *  @details The behaviour is similar to the following numpy code: blob[n, cn, ...]\n         */\n        Mat getPlane(int n, int cn);\n\n        /* Shape getters of 4-dimensional blobs. */\n        int cols() const;       //!< Returns size of the fourth axis blob.\n        int rows() const;       //!< Returns size of the thrid  axis blob.\n        int channels() const;   //!< Returns size of the second axis blob.\n        int num() const;        //!< Returns size of the first  axis blob.\n        Size size2() const;     //!< Returns cv::Size(cols(), rows())\n        Vec4i shape4() const;   //!< Returns shape of first four blob axes.\n\n        /** @brief Returns linear index of the element with specified coordinates in the blob.\n         *\n         * If @p n < dims() then unspecified coordinates will be filled by zeros.\n         * If @p n > dims() then extra coordinates will be ignored.\n         */\n        template<int n>\n        size_t offset(const Vec<int, n> &pos) const;\n        /** @overload */\n        size_t offset(int n = 0, int cn = 0, int row = 0, int col = 0) const;\n\n        /* CPU pointer getters */\n        /** @brief Returns pointer to the blob element with the specified position, stored in CPU memory.\n         *\n         * @p n correspond to the first axis, @p cn - to the second, etc.\n         * If dims() > 4 then unspecified coordinates will be filled by zeros.\n         * If dims() < 4 then extra coordinates will be ignored.\n         */\n        uchar *ptr(int n = 0, int cn = 0, int row = 0, int col = 0);\n        /** @overload */\n        template<typename TFloat>\n        TFloat *ptr(int n = 0, int cn = 0, int row = 0, int col = 0);\n        /** @overload ptr<float>() */\n        float *ptrf(int n = 0, int cn = 0, int row = 0, int col = 0);\n        //TODO: add const ptr methods\n\n        /** @brief Shares data from other @p blob.\n         * @returns *this\n         */\n        Blob &shareFrom(const Blob &blob);\n\n        /** @brief Changes shape of the blob without copying the data.\n         * @returns *this\n         */\n        Blob &reshape(const BlobShape &shape);\n\n        /** @brief Returns type of the blob. */\n        int type() const;\n\n    private:\n        const int *sizes() const;\n\n        Mat m;\n    };\n\n//! @}\n}\n}\n\n#include \"blob.inl.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/dnn/blob.inl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_DNN_DNN_BLOB_INL_HPP__\n#define __OPENCV_DNN_DNN_BLOB_INL_HPP__\n#include \"blob.hpp\"\n\nnamespace cv\n{\nnamespace dnn\n{\n\ninline BlobShape::BlobShape(int ndims, int fill) : sz( (size_t)std::max(ndims, 0) )\n{\n    CV_Assert(ndims >= 0);\n    for (int i = 0; i < ndims; i++)\n        sz[i] = fill;\n}\n\ninline BlobShape::BlobShape(int ndims, const int *sizes) : sz( (size_t)std::max(ndims, 0) )\n{\n    CV_Assert(ndims >= 0);\n    for (int i = 0; i < ndims; i++)\n        sz[i] = sizes[i];\n}\n\ninline BlobShape::BlobShape(int num, int cn, int rows, int cols) : sz(4)\n{\n    sz[0] = num;\n    sz[1] = cn;\n    sz[2] = rows;\n    sz[3] = cols;\n}\n\ninline BlobShape::BlobShape(const std::vector<int> &sizes) : sz( sizes.size() )\n{\n    for (int i = 0; i < (int)sizes.size(); i++)\n        sz[i] = sizes[i];\n}\n\ntemplate<int n>\ninline BlobShape::BlobShape(const Vec<int, n> &shape) : sz(n)\n{\n    for (int i = 0; i < n; i++)\n        sz[i] = shape[i];\n}\n\ninline int BlobShape::dims() const\n{\n    return (int)sz.size();\n}\n\ninline int BlobShape::xsize(int axis) const\n{\n    if (axis < -dims() || axis >= dims())\n        return 1;\n\n    return sz[(axis < 0) ? axis + dims() : axis];\n}\n\ninline int BlobShape::size(int axis) const\n{\n    CV_Assert(-dims() <= axis && axis < dims());\n    return sz[(axis < 0) ? axis + dims() : axis];\n}\n\ninline int &BlobShape::size(int axis)\n{\n    CV_Assert(-dims() <= axis && axis < dims());\n    return sz[(axis < 0) ? axis + dims() : axis];\n}\n\ninline int BlobShape::operator[] (int axis) const\n{\n    CV_Assert(-dims() <= axis && axis < dims());\n    return sz[(axis < 0) ? axis + dims() : axis];\n}\n\ninline int &BlobShape::operator[] (int axis)\n{\n    CV_Assert(-dims() <= axis && axis < dims());\n    return sz[(axis < 0) ? axis + dims() : axis];\n}\n\ninline ptrdiff_t BlobShape::total()\n{\n    if (dims() == 0)\n        return 0;\n\n    ptrdiff_t res = 1;\n    for (int i = 0; i < dims(); i++)\n        res *= sz[i];\n    return res;\n}\n\ninline const int *BlobShape::ptr() const\n{\n    return sz;\n}\n\ninline bool BlobShape::equal(const BlobShape &other) const\n{\n    if (this->dims() != other.dims())\n        return false;\n\n    for (int i = 0; i < other.dims(); i++)\n    {\n        if (sz[i] != other.sz[i])\n            return false;\n    }\n\n    return true;\n}\n\ninline bool BlobShape::operator==(const BlobShape &r) const\n{\n    return this->equal(r);\n}\n\nCV_EXPORTS std::ostream &operator<< (std::ostream &stream, const BlobShape &shape);\n\n/////////////////////////////////////////////////////////////////////\n\ninline int Blob::canonicalAxis(int axis) const\n{\n    CV_Assert(-dims() <= axis && axis < dims());\n    return (axis < 0) ? axis + dims() : axis;\n}\n\ninline int Blob::dims() const\n{\n    return m.dims;\n}\n\ninline int Blob::xsize(int axis) const\n{\n    if (axis < -dims() || axis >= dims())\n        return 1;\n\n    return sizes()[(axis < 0) ? axis + dims() : axis];\n}\n\ninline int Blob::size(int axis) const\n{\n    CV_Assert(-dims() <= axis && axis < dims());\n    return sizes()[(axis < 0) ? axis + dims() : axis];\n}\n\ninline size_t Blob::total(int startAxis, int endAxis) const\n{\n    if (startAxis < 0)\n        startAxis += dims();\n\n    if (endAxis == INT_MAX)\n        endAxis = dims();\n    else if (endAxis < 0)\n        endAxis += dims();\n\n    CV_Assert(0 <= startAxis && startAxis <= endAxis && endAxis <= dims());\n\n    size_t size = 1; //fix: assume that slice isn't empty\n    for (int i = startAxis; i < endAxis; i++)\n        size *= (size_t)sizes()[i];\n\n    return size;\n}\n\n\ntemplate<int n>\ninline size_t Blob::offset(const Vec<int, n> &pos) const\n{\n    size_t ofs = 0;\n    int i;\n    for (i = 0; i < std::min(n, dims()); i++)\n    {\n        CV_DbgAssert(pos[i] >= 0 && pos[i] < size(i));\n        ofs = ofs * (size_t)size(i) + pos[i];\n    }\n    for (; i < dims(); i++)\n        ofs *= (size_t)size(i);\n    return ofs;\n}\n\ninline size_t Blob::offset(int n, int cn, int row, int col) const\n{\n    return offset(Vec4i(n, cn, row, col));\n}\n\ninline float *Blob::ptrf(int n, int cn, int row, int col)\n{\n    CV_Assert(type() == CV_32F);\n    return (float*)m.data + offset(n, cn, row, col);\n}\n\ninline uchar *Blob::ptr(int n, int cn, int row, int col)\n{\n    return m.data + m.elemSize() * offset(n, cn, row, col);\n}\n\ntemplate<typename TFloat>\ninline TFloat* Blob::ptr(int n, int cn, int row, int col)\n{\n    CV_Assert(type() == cv::DataDepth<TFloat>::value);\n    return (TFloat*) ptr(n, cn, row, col);\n}\n\ninline BlobShape Blob::shape() const\n{\n    return BlobShape(dims(), sizes());\n}\n\ninline bool Blob::equalShape(const Blob &other) const\n{\n    if (this->dims() != other.dims())\n        return false;\n\n    for (int i = 0; i < dims(); i++)\n    {\n        if (this->sizes()[i] != other.sizes()[i])\n            return false;\n    }\n    return true;\n}\n\ninline Mat& Blob::matRef()\n{\n    return m;\n}\n\ninline const Mat& Blob::matRefConst() const\n{\n    return m;\n}\n\ninline UMat &Blob::umatRef()\n{\n    CV_Error(Error::StsNotImplemented, \"\");\n    return *(new UMat());\n}\n\ninline const UMat &Blob::umatRefConst() const\n{\n    CV_Error(Error::StsNotImplemented, \"\");\n    return *(new UMat());\n}\n\ninline Mat Blob::getPlane(int n, int cn)\n{\n    CV_Assert(dims() > 2);\n    return Mat(dims() - 2, sizes() + 2, type(), ptr(n, cn));\n}\n\ninline int Blob::cols() const\n{\n    return xsize(3);\n}\n\ninline int Blob::rows() const\n{\n    return xsize(2);\n}\n\ninline int Blob::channels() const\n{\n    return xsize(1);\n}\n\ninline int Blob::num() const\n{\n    return xsize(0);\n}\n\ninline Size Blob::size2() const\n{\n    return Size(cols(), rows());\n}\n\ninline int Blob::type() const\n{\n    return m.depth();\n}\n\ninline const int * Blob::sizes() const\n{\n    return &m.size[0];\n}\n\n\ninline Blob &Blob::shareFrom(const Blob &blob)\n{\n    this->m = blob.m;\n    return *this;\n}\n\ninline Blob &Blob::reshape(const BlobShape &shape)\n{\n    m = m.reshape(1, shape.dims(), shape.ptr());\n    return *this;\n}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/dnn/dict.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_DNN_DNN_DICT_HPP__\n#define __OPENCV_DNN_DNN_DICT_HPP__\n\n#include <opencv2/core.hpp>\n#include <map>\n#include <ostream>\n\nnamespace cv\n{\nnamespace dnn\n{\n//! @addtogroup dnn\n//! @{\n\n/** @brief This struct stores the scalar value (or array) of one of the following type: double, cv::String or int64.\n *  @todo Maybe int64 is useless because double type exactly stores at least 2^52 integers.\n */\nstruct DictValue\n{\n    DictValue(const DictValue &r);\n    DictValue(int p = 0)        : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; }       //!< Constructs integer scalar\n    DictValue(unsigned p)       : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; }       //!< Constructs integer scalar\n    DictValue(double p)         : type(Param::REAL), pd(new AutoBuffer<double,1>) { (*pd)[0] = p; }     //!< Constructs floating point scalar\n    DictValue(const String &p)  : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = p; }   //!< Constructs string scalar\n\n    template<typename TypeIter>\n    static DictValue arrayInt(TypeIter begin, int size);    //!< Constructs integer array\n    template<typename TypeIter>\n    static DictValue arrayReal(TypeIter begin, int size);   //!< Constructs floating point array\n    template<typename TypeIter>\n    static DictValue arrayString(TypeIter begin, int size); //!< Constructs array of strings\n\n    template<typename T>\n    T get(int idx = -1) const; //!< Tries to convert array element with specified index to requested type and returns its.\n\n    int size() const;\n\n    bool isInt() const;\n    bool isString() const;\n    bool isReal() const;\n\n    DictValue &operator=(const DictValue &r);\n\n    friend std::ostream &operator<<(std::ostream &stream, const DictValue &dictv);\n\n    ~DictValue();\n\nprivate:\n\n    int type;\n\n    union\n    {\n        AutoBuffer<int64, 1> *pi;\n        AutoBuffer<double, 1> *pd;\n        AutoBuffer<String, 1> *ps;\n        void *p;\n    };\n\n    DictValue(int _type, void *_p) : type(_type), p(_p) {}\n    void release();\n};\n\n/** @brief This class implements name-value dictionary, values are instances of DictValue. */\nclass CV_EXPORTS Dict\n{\n    typedef std::map<String, DictValue> _Dict;\n    _Dict dict;\n\npublic:\n\n    //! Checks a presence of the @p key in the dictionary.\n    bool has(const String &key);\n\n    //! If the @p key in the dictionary then returns pointer to its value, else returns NULL.\n    DictValue *ptr(const String &key);\n\n    //! If the @p key in the dictionary then returns its value, else an error will be generated.\n    const DictValue &get(const String &key) const;\n\n    /** @overload */\n    template <typename T>\n    T get(const String &key) const;\n\n    //! If the @p key in the dictionary then returns its value, else returns @p defaultValue.\n    template <typename T>\n    T get(const String &key, const T &defaultValue) const;\n\n    //! Sets new @p value for the @p key, or adds new key-value pair into the dictionary.\n    template<typename T>\n    const T &set(const String &key, const T &value);\n\n    friend std::ostream &operator<<(std::ostream &stream, const Dict &dict);\n};\n\n//! @}\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/dnn/dnn.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_DNN_DNN_HPP__\n#define __OPENCV_DNN_DNN_HPP__\n\n#include <vector>\n#include <opencv2/core.hpp>\n#include <opencv2/dnn/dict.hpp>\n#include <opencv2/dnn/blob.hpp>\n\nnamespace cv\n{\nnamespace dnn //! This namespace is used for dnn module functionlaity.\n{\n//! @addtogroup dnn\n//! @{\n\n    /** @brief Initialize dnn module and built-in layers.\n     *\n     * This function automatically called on most of OpenCV builds,\n     * but you need to call it manually on some specific configurations (iOS for example).\n     */\n    CV_EXPORTS void initModule();\n\n    /** @brief This class provides all data needed to initialize layer.\n     *\n     * It includes dictionary with scalar params (which can be readed by using Dict interface),\n     * blob params #blobs and optional meta information: #name and #type of layer instance.\n    */\n    struct CV_EXPORTS LayerParams : public Dict\n    {\n        std::vector<Blob> blobs; //!< List of learned parameters stored as blobs.\n\n        String name; //!< Name of the layer instance (optional, can be used internal purposes).\n        String type; //!< Type name which was used for creating layer by layer factory (optional).\n    };\n\n    /** @brief This interface class allows to build new Layers - are building blocks of networks.\n     *\n     * Each class, derived from Layer, must implement allocate() methods to declare own outputs and forward() to compute outputs.\n     * Also before using the new layer into networks you must register your layer by using one of @ref LayerFactoryModule \"LayerFactory\" macros.\n     */\n    struct CV_EXPORTS Layer\n    {\n        //! List of learned parameters must be stored here to allow read them by using Net::getParam().\n        std::vector<Blob> blobs;\n\n        /** @brief Allocates internal buffers and output blobs with respect to the shape of inputs.\n         *  @param[in]  input  vector of already allocated input blobs\n         *  @param[out] output vector of output blobs, which must be allocated\n         *\n         * This method must create each produced blob according to shape of @p input blobs and internal layer params.\n         * If this method is called first time then @p output vector consists from empty blobs and its size determined by number of output connections.\n         * This method can be called multiple times if size of any @p input blob was changed.\n         */\n        virtual void allocate(const std::vector<Blob*> &input, std::vector<Blob> &output) = 0;\n\n        /** @brief Given the @p input blobs, computes the output @p blobs.\n         *  @param[in]  input  the input blobs.\n         *  @param[out] output allocated output blobs, which will store results of the computation.\n         */\n        virtual void forward(std::vector<Blob*> &input, std::vector<Blob> &output) = 0;\n\n        /** @brief Returns index of input blob into the input array.\n         *  @param inputName label of input blob\n         *\n         * Each layer input and output can be labeled to easily identify them using \"%<layer_name%>[.output_name]\" notation.\n         * This method maps label of input blob to its index into input vector.\n         */\n        virtual int inputNameToIndex(String inputName);\n        /** @brief Returns index of output blob in output array.\n         *  @see inputNameToIndex()\n         */\n        virtual int outputNameToIndex(String outputName);\n\n        String name; //!< Name of the layer instance, can be used for logging or other internal purposes.\n        String type; //!< Type name which was used for creating layer by layer factory.\n\n        Layer();\n        explicit Layer(const LayerParams &params); //!< Initialize only #name, #type and #blobs fields.\n        virtual ~Layer();\n    };\n\n    /** @brief This class allows to create and manipulate comprehensive artificial neural networks.\n     *\n     * Neural network is presented as directed acyclic graph (DAG), where vertices are Layer instances,\n     * and edges specify relationships between layers inputs and outputs.\n     *\n     * Each network layer has unique integer id and unique string name inside its network.\n     * LayerId can store either layer name or layer id.\n     *\n     * This class supports reference counting of its instances, i. e. copies point to the same instance.\n     */\n    class CV_EXPORTS Net\n    {\n    public:\n\n        Net();  //!< Default constructor.\n        ~Net(); //!< Destructor frees the net only if there aren't references to the net anymore.\n\n        /** @brief Adds new layer to the net.\n         *  @param name   unique name of the adding layer.\n         *  @param type   typename of the adding layer (type must be registered in LayerRegister).\n         *  @param params parameters which will be used to initialize the creating layer.\n         *  @returns unique identifier of created layer, or -1 if a failure will happen.\n         */\n        int addLayer(const String &name, const String &type, LayerParams &params);\n        /** @brief Adds new layer and connects its first input to the first output of previously added layer.\n         *  @see addLayer()\n         */\n        int addLayerToPrev(const String &name, const String &type, LayerParams &params);\n\n        /** @brief Converts string name of the layer to the integer identifier.\n         *  @returns id of the layer, or -1 if the layer wasn't found.\n         */\n        int getLayerId(const String &layer);\n\n        /** @brief Container for strings and integers. */\n        typedef DictValue LayerId;\n\n        /** @brief Delete layer for the network (not implemented yet) */\n        void deleteLayer(LayerId layer);\n\n        /** @brief Connects output of the first layer to input of the second layer.\n         *  @param outPin descriptor of the first layer output.\n         *  @param inpPin descriptor of the second layer input.\n         *\n         * Descriptors have the following template <DFN>&lt;layer_name&gt;[.input_number]</DFN>:\n         * - the first part of the template <DFN>layer_name</DFN> is sting name of the added layer.\n         *   If this part is empty then the network input pseudo layer will be used;\n         * - the second optional part of the template <DFN>input_number</DFN>\n         *   is either number of the layer input, either label one.\n         *   If this part is omitted then the first layer input will be used.\n         *\n         *  @see setNetInputs(), Layer::inputNameToIndex(), Layer::outputNameToIndex()\n         */\n        void connect(String outPin, String inpPin);\n        /** @brief Connects #@p outNum output of the first layer to #@p inNum input of the second layer.\n         *  @param outLayerId identifier of the first layer\n         *  @param inpLayerId identifier of the second layer\n         *  @param outNum number of the first layer output\n         *  @param inpNum number of the second layer input\n         */\n        void connect(int outLayerId, int outNum, int inpLayerId, int inpNum);\n        /** @brief Sets ouputs names of the network input pseudo layer.\n         *\n         * Each net always has special own the network input pseudo layer with id=0.\n         * This layer stores the user blobs only and don't make any computations.\n         * In fact, this layer provides the only way to pass user data into the network.\n         * As any other layer, this layer can label its outputs and this function provides an easy way to do this.\n         */\n        void setNetInputs(const std::vector<String> &inputBlobNames);\n\n        /** @brief Runs forward pass for the whole network */\n        void forward();\n        /** @brief Runs forward pass to compute output of layer @p toLayer */\n        void forward(LayerId toLayer);\n        /** @brief Runs forward pass to compute output of layer @p toLayer, but computations start from @p startLayer */\n        void forward(LayerId startLayer, LayerId toLayer);\n        /** @overload */\n        void forward(const std::vector<LayerId> &startLayers, const std::vector<LayerId> &toLayers);\n\n        //TODO:\n        /** @brief Optimized forward.\n         *  @warning Not implemented yet.\n         *  @details Makes forward only those layers which weren't changed after previous forward().\n         */\n        void forwardOpt(LayerId toLayer);\n        /** @overload */\n        void forwardOpt(const std::vector<LayerId> &toLayers);\n\n        /** @brief Sets the new value for the layer output blob\n         *  @param outputName descriptor of the updating layer output blob.\n         *  @param blob new blob.\n         *  @see connect(String, String) to know format of the descriptor.\n         *  @note If updating blob is not empty then @p blob must have the same shape,\n         *  because network reshaping is not implemented yet.\n         */\n        void setBlob(String outputName, const Blob &blob);\n        /** @brief Returns the layer output blob.\n         *  @param outputName the descriptor of the returning layer output blob.\n         *  @see connect(String, String)\n         */\n        Blob getBlob(String outputName);\n\n        /** @brief Sets the new value for the learned param of the layer.\n         *  @param layer name or id of the layer.\n         *  @param numParam index of the layer parameter in the Layer::blobs array.\n         *  @param blob the new value.\n         *  @see Layer::blobs\n         *  @note If shape of the new blob differs from the previous shape,\n         *  then the following forward pass may fail.\n        */\n        void setParam(LayerId layer, int numParam, const Blob &blob);\n        /** @brief Returns parameter blob of the layer.\n         *  @param layer name or id of the layer.\n         *  @param numParam index of the layer parameter in the Layer::blobs array.\n         *  @see Layer::blobs\n         */\n        Blob getParam(LayerId layer, int numParam = 0);\n\n    private:\n\n        struct Impl;\n        Ptr<Impl> impl;\n    };\n\n    /** @brief Small interface class for loading trained serialized models of different dnn-frameworks. */\n    class Importer\n    {\n    public:\n\n        /** @brief Adds loaded layers into the @p net and sets connetions between them. */\n        virtual void populateNet(Net net) = 0;\n\n        virtual ~Importer();\n    };\n\n    /** @brief Creates the importer of <a href=\"http://caffe.berkeleyvision.org\">Caffe</a> framework network.\n     *  @param prototxt   path to the .prototxt file with text description of the network architecture.\n     *  @param caffeModel path to the .caffemodel file with learned network.\n     *  @returns Pointer to the created importer, NULL in failure cases.\n     */\n    CV_EXPORTS Ptr<Importer> createCaffeImporter(const String &prototxt, const String &caffeModel = String());\n\n    /** @brief Creates the importer of <a href=\"http://torch.ch\">Torch7</a> framework network.\n     *  @param filename path to the file, dumped from Torch by using torch.save() function.\n     *  @param isBinary specifies whether the network was serialized in ascii mode or binary.\n     *  @returns Pointer to the created importer, NULL in failure cases.\n     *\n     *  @warning Torch7 importer is experimental now, you need explicitly set CMake opencv_dnn_BUILD_TORCH_IMPORTER flag to compile its.\n     *\n     *  @note Ascii mode of Torch serializer is more preferable, because binary mode extensively use long type of C language,\n     *  which has different bit-length on different systems.\n     *\n     * The loading file must contain serialized <a href=\"https://github.com/torch/nn/blob/master/doc/module.md\">nn.Module</a> object\n     * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors.\n     *\n     * List of supported layers (i.e. object instances derived from Torch nn.Module class):\n     * - nn.Sequential\n     * - nn.Parallel\n     * - nn.Concat\n     * - nn.Linear\n     * - nn.SpatialConvolution\n     * - nn.SpatialMaxPooling, nn.SpatialAveragePooling\n     * - nn.ReLU, nn.TanH, nn.Sigmoid\n     * - nn.Reshape\n     *\n     * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.\n     */\n    CV_EXPORTS Ptr<Importer> createTorchImporter(const String &filename, bool isBinary = true);\n\n    /** @brief Loads blob which was serialized as torch.Tensor object of Torch7 framework.\n     *  @warning This function has the same limitations as createTorchImporter().\n     */\n    CV_EXPORTS Blob readTorchBlob(const String &filename, bool isBinary = true);\n\n//! @}\n}\n}\n\n#include <opencv2/dnn/layer.hpp>\n#include <opencv2/dnn/dnn.inl.hpp>\n\n#endif  /* __OPENCV_DNN_DNN_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/dnn/dnn.inl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_DNN_DNN_INL_HPP__\n#define __OPENCV_DNN_DNN_INL_HPP__\n\n#include <opencv2/dnn.hpp>\n\nnamespace cv\n{\nnamespace dnn\n{\n\ntemplate<typename TypeIter>\nDictValue DictValue::arrayInt(TypeIter begin, int size)\n{\n    DictValue res(Param::INT, new AutoBuffer<int64, 1>(size));\n    for (int j = 0; j < size; begin++, j++)\n        (*res.pi)[j] = *begin;\n    return res;\n}\n\ntemplate<typename TypeIter>\nDictValue DictValue::arrayReal(TypeIter begin, int size)\n{\n    DictValue res(Param::REAL, new AutoBuffer<double, 1>(size));\n    for (int j = 0; j < size; begin++, j++)\n        (*res.pd)[j] = *begin;\n    return res;\n}\n\ntemplate<typename TypeIter>\nDictValue DictValue::arrayString(TypeIter begin, int size)\n{\n    DictValue res(Param::STRING, new AutoBuffer<String, 1>(size));\n    for (int j = 0; j < size; begin++, j++)\n        (*res.ps)[j] = *begin;\n    return res;\n}\n\ntemplate<>\ninline DictValue DictValue::get<DictValue>(int idx) const\n{\n    CV_Assert(idx == -1);\n    return *this;\n}\n\ntemplate<>\ninline int64 DictValue::get<int64>(int idx) const\n{\n    CV_Assert(idx == -1 && size() == 1 || idx >= 0 && idx < size());\n    idx = (idx == -1) ? 0 : idx;\n\n    if (type == Param::INT)\n    {\n        return (*pi)[idx];\n    }\n    else if (type == Param::REAL)\n    {\n        double doubleValue = (*pd)[idx];\n\n        double fracpart, intpart;\n        fracpart = std::modf(doubleValue, &intpart);\n        CV_Assert(fracpart == 0.0);\n\n        return (int64)doubleValue;\n    }\n    else\n    {\n        CV_Assert(isInt() || isReal());\n        return 0;\n    }\n}\n\ntemplate<>\ninline int DictValue::get<int>(int idx) const\n{\n    return (int)get<int64>(idx);\n}\n\ntemplate<>\ninline unsigned DictValue::get<unsigned>(int idx) const\n{\n    return (unsigned)get<int64>(idx);\n}\n\ntemplate<>\ninline bool DictValue::get<bool>(int idx) const\n{\n    return (get<int64>(idx) != 0);\n}\n\ntemplate<>\ninline double DictValue::get<double>(int idx) const\n{\n    CV_Assert(idx == -1 && size() == 1 || idx >= 0 && idx < size());\n    idx = (idx == -1) ? 0 : idx;\n\n    if (type == Param::REAL)\n    {\n        return (*pd)[idx];\n    }\n    else if (type == Param::INT)\n    {\n        return (double)(*pi)[idx];\n    }\n    else\n    {\n        CV_Assert(isReal() || isInt());\n        return 0;\n    }\n}\n\ntemplate<>\ninline float DictValue::get<float>(int idx) const\n{\n    return (float)get<double>(idx);\n}\n\ntemplate<>\ninline String DictValue::get<String>(int idx) const\n{\n    CV_Assert(isString());\n    CV_Assert(idx == -1 && ps->size() == 1 || idx >= 0 && idx < (int)ps->size());\n    return (*ps)[(idx == -1) ? 0 : idx];\n}\n\ninline void DictValue::release()\n{\n    switch (type)\n    {\n    case Param::INT:\n        delete pi;\n        break;\n    case Param::STRING:\n        delete ps;\n        break;\n    case Param::REAL:\n        delete pd;\n        break;\n    }\n}\n\ninline DictValue::~DictValue()\n{\n    release();\n}\n\ninline DictValue & DictValue::operator=(const DictValue &r)\n{\n    if (&r == this)\n        return *this;\n\n    if (r.type == Param::INT)\n    {\n        AutoBuffer<int64, 1> *tmp = new AutoBuffer<int64, 1>(*r.pi);\n        release();\n        pi = tmp;\n    }\n    else if (r.type == Param::STRING)\n    {\n        AutoBuffer<String, 1> *tmp = new AutoBuffer<String, 1>(*r.ps);\n        release();\n        ps = tmp;\n    }\n    else if (r.type == Param::REAL)\n    {\n        AutoBuffer<double, 1> *tmp = new AutoBuffer<double, 1>(*r.pd);\n        release();\n        pd = tmp;\n    }\n\n    type = r.type;\n\n    return *this;\n}\n\ninline DictValue::DictValue(const DictValue &r)\n{\n    type = r.type;\n\n    if (r.type == Param::INT)\n        pi = new AutoBuffer<int64, 1>(*r.pi);\n    else if (r.type == Param::STRING)\n        ps = new AutoBuffer<String, 1>(*r.ps);\n    else if (r.type == Param::REAL)\n        pd = new AutoBuffer<double, 1>(*r.pd);\n}\n\ninline bool DictValue::isString() const\n{\n    return (type == Param::STRING);\n}\n\ninline bool DictValue::isInt() const\n{\n    return (type == Param::INT);\n}\n\ninline bool DictValue::isReal() const\n{\n    return (type == Param::REAL || type == Param::INT);\n}\n\ninline int DictValue::size() const\n{\n    switch (type)\n    {\n    case Param::INT:\n        return (int)pi->size();\n        break;\n    case Param::STRING:\n        return (int)ps->size();\n        break;\n    case Param::REAL:\n        return (int)pd->size();\n        break;\n    default:\n        CV_Error(Error::StsInternal, \"\");\n        return -1;\n    }\n}\n\ninline std::ostream &operator<<(std::ostream &stream, const DictValue &dictv)\n{\n    int i;\n\n    if (dictv.isInt())\n    {\n        for (i = 0; i < dictv.size() - 1; i++)\n            stream << dictv.get<int64>(i) << \", \";\n        stream << dictv.get<int64>(i);\n    }\n    else if (dictv.isReal())\n    {\n        for (i = 0; i < dictv.size() - 1; i++)\n            stream << dictv.get<double>(i) << \", \";\n        stream << dictv.get<double>(i);\n    }\n    else if (dictv.isString())\n    {\n        for (i = 0; i < dictv.size() - 1; i++)\n            stream << \"\\\"\" << dictv.get<String>(i) << \"\\\", \";\n        stream << dictv.get<String>(i);\n    }\n\n    return stream;\n}\n\n/////////////////////////////////////////////////////////////////\n\ninline bool Dict::has(const String &key)\n{\n    return dict.count(key) != 0;\n}\n\ninline DictValue *Dict::ptr(const String &key)\n{\n    _Dict::iterator i = dict.find(key);\n    return (i == dict.end()) ? NULL : &i->second;\n}\n\ninline const DictValue &Dict::get(const String &key) const\n{\n    _Dict::const_iterator i = dict.find(key);\n    if (i == dict.end())\n        CV_Error(Error::StsObjectNotFound, \"Required argument \\\"\" + key + \"\\\" not found into dictionary\");\n    return i->second;\n}\n\ntemplate <typename T>\ninline T Dict::get(const String &key) const\n{\n    return this->get(key).get<T>();\n}\n\ntemplate <typename T>\ninline T Dict::get(const String &key, const T &defaultValue) const\n{\n    _Dict::const_iterator i = dict.find(key);\n\n    if (i != dict.end())\n        return i->second.get<T>();\n    else\n        return defaultValue;\n}\n\ntemplate<typename T>\ninline const T &Dict::set(const String &key, const T &value)\n{\n    _Dict::iterator i = dict.find(key);\n\n    if (i != dict.end())\n        i->second = DictValue(value);\n    else\n        dict.insert(std::make_pair(key, DictValue(value)));\n\n    return value;\n}\n\ninline std::ostream &operator<<(std::ostream &stream, const Dict &dict)\n{\n    Dict::_Dict::const_iterator it;\n    for (it = dict.dict.begin(); it != dict.dict.end(); it++)\n        stream << it->first << \" : \" << it->second << \"\\n\";\n\n    return stream;\n}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/dnn/layer.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_DNN_LAYER_HPP__\n#define __OPENCV_DNN_LAYER_HPP__\n#include <opencv2/dnn.hpp>\n\nnamespace cv\n{\nnamespace dnn\n{\n//! @addtogroup dnn\n//! @{\n//!\n//! @defgroup LayerFactoryModule Utilities for new layers registration\n//! @{\n\n/** @brief %Layer factory allows to create instances of registered layers. */\nclass CV_EXPORTS LayerFactory\n{\npublic:\n\n    //! Each Layer class must provide this function to the factory\n    typedef Ptr<Layer>(*Constuctor)(LayerParams &params);\n\n    //! Registers the layer class with typename @p type and specified @p constructor.\n    static void registerLayer(const String &type, Constuctor constructor);\n\n    //! Unregisters registered layer with specified type name.\n    static void unregisterLayer(const String &type);\n\n    /** @brief Creates instance of registered layer.\n     *  @param type type name of creating layer.\n     *  @param params parameters which will be used for layer initialization.\n     */\n    static Ptr<Layer> createLayerInstance(const String &type, LayerParams& params);\n\nprivate:\n    LayerFactory();\n\n    struct Impl;\n    static Ptr<Impl> impl();\n};\n\n/** @brief Registers layer constructor in runtime.\n*   @param type string, containing type name of the layer.\n*   @param constuctorFunc pointer to the function of type LayerRegister::Constuctor, which creates the layer.\n*   @details This macros must be placed inside the function code.\n*/\n#define REG_RUNTIME_LAYER_FUNC(type, constuctorFunc) \\\n    LayerFactory::registerLayer(#type, constuctorFunc);\n\n/** @brief Registers layer class in runtime.\n *  @param type string, containing type name of the layer.\n *  @param class C++ class, derived from Layer.\n *  @details This macros must be placed inside the function code.\n */\n#define REG_RUNTIME_LAYER_CLASS(type, class) \\\n    LayerFactory::registerLayer(#type, _layerDynamicRegisterer<class>);\n\n/** @brief Registers layer constructor on module load time.\n*   @param type string, containing type name of the layer.\n*   @param constuctorFunc pointer to the function of type LayerRegister::Constuctor, which creates the layer.\n*   @details This macros must be placed outside the function code.\n*/\n#define REG_STATIC_LAYER_FUNC(type, constuctorFunc) \\\nstatic _LayerStaticRegisterer __LayerStaticRegisterer_##type(#type, constuctorFunc);\n\n/** @brief Registers layer class on module load time.\n *  @param type string, containing type name of the layer.\n *  @param class C++ class, derived from Layer.\n *  @details This macros must be placed outside the function code.\n */\n#define REG_STATIC_LAYER_CLASS(type, class)                         \\\nPtr<Layer> __LayerStaticRegisterer_func_##type(LayerParams &params) \\\n    { return Ptr<Layer>(new class(params)); }                       \\\nstatic _LayerStaticRegisterer __LayerStaticRegisterer_##type(#type, __LayerStaticRegisterer_func_##type);\n\n\n//! @}\n//! @}\n\n\ntemplate<typename LayerClass>\nPtr<Layer> _layerDynamicRegisterer(LayerParams &params)\n{\n    return Ptr<Layer>(new LayerClass(params));\n}\n\n//allows automatically register created layer on module load time\nstruct _LayerStaticRegisterer\n{\n    String type;\n\n    _LayerStaticRegisterer(const String &type, LayerFactory::Constuctor constuctor)\n    {\n        this->type = type;\n        LayerFactory::registerLayer(type, constuctor);\n    }\n\n    ~_LayerStaticRegisterer()\n    {\n        LayerFactory::unregisterLayer(type);\n    }\n};\n\n}\n}\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/dnn.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_DNN_HPP__\n#define __OPENCV_DNN_HPP__\n\n// This is an umbrealla header to include into you project.\n// We are free to change headers layout in dnn subfolder, so please include\n// this header for future compartibility\n\n\n/** @defgroup dnn Deep Neural Network module\n  @{\n    This module contains:\n        - API for new layers creation, layers are building bricks of neural networks;\n        - set of built-in most-useful Layers;\n        - API to constuct and modify comprehensive neural networks from layers;\n        - functionality for loading serialized networks models from differnet frameworks.\n\n    Functionality of this module is designed only for forward pass computations (i. e. network testing).\n    A network training is in principle not supported.\n  @}\n*/\n#include <opencv2/dnn/dnn.hpp>\n\n#endif /* __OPENCV_DNN_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/dpm.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Itseez Inc or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n// Implementation authors:\n// Jiaolong Xu - jiaolongxu@gmail.com\n// Evgeniy Kozinov - evgeniy.kozinov@gmail.com\n// Valentina Kustikova - valentina.kustikova@gmail.com\n// Nikolai Zolotykh - Nikolai.Zolotykh@gmail.com\n// Iosif Meyerov - meerov@vmk.unn.ru\n// Alexey Polovinkin - polovinkin.alexey@gmail.com\n//\n//M*/\n\n#ifndef __OPENCV_LATENTSVM_HPP__\n#define __OPENCV_LATENTSVM_HPP__\n\n#include \"opencv2/core.hpp\"\n\n#include <map>\n#include <vector>\n#include <string>\n\n/** @defgroup dpm Deformable Part-based Models\n\nDiscriminatively Trained Part Based Models for Object Detection\n---------------------------------------------------------------\n\nThe object detector described below has been initially proposed by P.F. Felzenszwalb in\n@cite Felzenszwalb2010a . It is based on a Dalal-Triggs detector that uses a single filter on histogram\nof oriented gradients (HOG) features to represent an object category. This detector uses a sliding\nwindow approach, where a filter is applied at all positions and scales of an image. The first\ninnovation is enriching the Dalal-Triggs model using a star-structured part-based model defined by a\n\"root\" filter (analogous to the Dalal-Triggs filter) plus a set of parts filters and associated\ndeformation models. The score of one of star models at a particular position and scale within an\nimage is the score of the root filter at the given location plus the sum over parts of the maximum,\nover placements of that part, of the part filter score on its location minus a deformation cost\neasuring the deviation of the part from its ideal location relative to the root. Both root and part\nfilter scores are defined by the dot product between a filter (a set of weights) and a subwindow of\na feature pyramid computed from the input image. Another improvement is a representation of the\nclass of models by a mixture of star models. The score of a mixture model at a particular position\nand scale is the maximum over components, of the score of that component model at the given\nlocation.\n\nThe detector was dramatically speeded-up with cascade algorithm proposed by P.F. Felzenszwalb in\n@cite Felzenszwalb2010b . The algorithm prunes partial hypotheses using thresholds on their scores.The\nbasic idea of the algorithm is to use a hierarchy of models defined by an ordering of the original\nmodel's parts. For a model with (n+1) parts, including the root, a sequence of (n+1) models is\nobtained. The i-th model in this sequence is defined by the first i parts from the original model.\nUsing this hierarchy, low scoring hypotheses can be pruned after looking at the best configuration\nof a subset of the parts. Hypotheses that score high under a weak model are evaluated further using\na richer model.\n\nIn OpenCV there is an C++ implementation of DPM cascade detector.\n\n*/\n\nnamespace cv\n{\n\nnamespace dpm\n{\n\n/** @brief This is a C++ abstract class, it provides external user API to work with DPM.\n */\nclass CV_EXPORTS_W DPMDetector\n{\npublic:\n\n    struct CV_EXPORTS_W ObjectDetection\n    {\n        ObjectDetection();\n        ObjectDetection( const Rect& rect, float score, int classID=-1 );\n        Rect rect;\n        float score;\n        int classID;\n    };\n\n    virtual bool isEmpty() const = 0;\n\n    /** @brief Find rectangular regions in the given image that are likely to contain objects of loaded classes\n    (models) and corresponding confidence levels.\n    @param image An image.\n    @param objects The detections: rectangulars, scores and class IDs.\n    */\n    virtual void detect(cv::Mat &image, CV_OUT std::vector<ObjectDetection> &objects) = 0;\n\n    /** @brief Return the class (model) names that were passed in constructor or method load or extracted from\n    models filenames in those methods.\n     */\n    virtual std::vector<std::string> const& getClassNames() const = 0;\n\n    /** @brief Return a count of loaded models (classes).\n     */\n    virtual size_t getClassCount() const = 0;\n\n    /** @brief Load the trained models from given .xml files and return cv::Ptr\\<DPMDetector\\>.\n    @param filenames A set of filenames storing the trained detectors (models). Each file contains one\n    model. See examples of such files here `/opencv_extra/testdata/cv/dpm/VOC2007_Cascade/`.\n    @param classNames A set of trained models names. If it's empty then the name of each model will be\n    constructed from the name of file containing the model. E.g. the model stored in\n    \"/home/user/cat.xml\" will get the name \"cat\".\n     */\n    static cv::Ptr<DPMDetector> create(std::vector<std::string> const &filenames,\n            std::vector<std::string> const &classNames = std::vector<std::string>());\n\n    virtual ~DPMDetector(){}\n};\n\n} // namespace dpm\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/face/facerec.hpp",
    "content": "// This file is part of OpenCV project.\n// It is subject to the license terms in the LICENSE file found in the top-level directory\n// of this distribution and at http://opencv.org/license.html.\n\n// Copyright (c) 2011,2012. Philipp Wagner <bytefish[at]gmx[dot]de>.\n// Third party copyrights are property of their respective owners.\n\n#ifndef __OPENCV_FACEREC_HPP__\n#define __OPENCV_FACEREC_HPP__\n\n#include \"opencv2/face.hpp\"\n#include \"opencv2/core.hpp\"\n\nnamespace cv { namespace face {\n\n//! @addtogroup face\n//! @{\n\n// base for two classes\nclass CV_EXPORTS_W BasicFaceRecognizer : public FaceRecognizer\n{\npublic:\n    /** @see setNumComponents */\n    CV_WRAP virtual int getNumComponents() const = 0;\n    /** @copybrief getNumComponents @see getNumComponents */\n    CV_WRAP virtual void setNumComponents(int val) = 0;\n    /** @see setThreshold */\n    CV_WRAP virtual double getThreshold() const = 0;\n    /** @copybrief getThreshold @see getThreshold */\n    CV_WRAP virtual void setThreshold(double val) = 0;\n    CV_WRAP virtual std::vector<cv::Mat> getProjections() const = 0;\n    CV_WRAP virtual cv::Mat getLabels() const = 0;\n    CV_WRAP virtual cv::Mat getEigenValues() const = 0;\n    CV_WRAP virtual cv::Mat getEigenVectors() const = 0;\n    CV_WRAP virtual cv::Mat getMean() const = 0;\n};\n\n/**\n@param num_components The number of components (read: Eigenfaces) kept for this Principal\nComponent Analysis. As a hint: There's no rule how many components (read: Eigenfaces) should be\nkept for good reconstruction capabilities. It is based on your input data, so experiment with the\nnumber. Keeping 80 components should almost always be sufficient.\n@param threshold The threshold applied in the prediction.\n\n### Notes:\n\n-   Training and prediction must be done on grayscale images, use cvtColor to convert between the\n    color spaces.\n-   **THE EIGENFACES METHOD MAKES THE ASSUMPTION, THAT THE TRAINING AND TEST IMAGES ARE OF EQUAL\n    SIZE.** (caps-lock, because I got so many mails asking for this). You have to make sure your\n    input data has the correct shape, else a meaningful exception is thrown. Use resize to resize\n    the images.\n-   This model does not support updating.\n\n### Model internal data:\n\n-   num_components see createEigenFaceRecognizer.\n-   threshold see createEigenFaceRecognizer.\n-   eigenvalues The eigenvalues for this Principal Component Analysis (ordered descending).\n-   eigenvectors The eigenvectors for this Principal Component Analysis (ordered by their\n    eigenvalue).\n-   mean The sample mean calculated from the training data.\n-   projections The projections of the training data.\n-   labels The threshold applied in the prediction. If the distance to the nearest neighbor is\n    larger than the threshold, this method returns -1.\n */\nCV_EXPORTS_W Ptr<BasicFaceRecognizer> createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);\n\n/**\n@param num_components The number of components (read: Fisherfaces) kept for this Linear\nDiscriminant Analysis with the Fisherfaces criterion. It's useful to keep all components, that\nmeans the number of your classes c (read: subjects, persons you want to recognize). If you leave\nthis at the default (0) or set it to a value less-equal 0 or greater (c-1), it will be set to the\ncorrect number (c-1) automatically.\n@param threshold The threshold applied in the prediction. If the distance to the nearest neighbor\nis larger than the threshold, this method returns -1.\n\n### Notes:\n\n-   Training and prediction must be done on grayscale images, use cvtColor to convert between the\n    color spaces.\n-   **THE FISHERFACES METHOD MAKES THE ASSUMPTION, THAT THE TRAINING AND TEST IMAGES ARE OF EQUAL\n    SIZE.** (caps-lock, because I got so many mails asking for this). You have to make sure your\n    input data has the correct shape, else a meaningful exception is thrown. Use resize to resize\n    the images.\n-   This model does not support updating.\n\n### Model internal data:\n\n-   num_components see createFisherFaceRecognizer.\n-   threshold see createFisherFaceRecognizer.\n-   eigenvalues The eigenvalues for this Linear Discriminant Analysis (ordered descending).\n-   eigenvectors The eigenvectors for this Linear Discriminant Analysis (ordered by their\n    eigenvalue).\n-   mean The sample mean calculated from the training data.\n-   projections The projections of the training data.\n-   labels The labels corresponding to the projections.\n */\nCV_EXPORTS_W Ptr<BasicFaceRecognizer> createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);\n\nclass CV_EXPORTS_W LBPHFaceRecognizer : public FaceRecognizer\n{\npublic:\n    /** @see setGridX */\n    CV_WRAP virtual int getGridX() const = 0;\n    /** @copybrief getGridX @see getGridX */\n    CV_WRAP virtual void setGridX(int val) = 0;\n    /** @see setGridY */\n    CV_WRAP virtual int getGridY() const = 0;\n    /** @copybrief getGridY @see getGridY */\n    CV_WRAP virtual void setGridY(int val) = 0;\n    /** @see setRadius */\n    CV_WRAP virtual int getRadius() const = 0;\n    /** @copybrief getRadius @see getRadius */\n    CV_WRAP virtual void setRadius(int val) = 0;\n    /** @see setNeighbors */\n    CV_WRAP virtual int getNeighbors() const = 0;\n    /** @copybrief getNeighbors @see getNeighbors */\n    CV_WRAP virtual void setNeighbors(int val) = 0;\n    /** @see setThreshold */\n    CV_WRAP virtual double getThreshold() const = 0;\n    /** @copybrief getThreshold @see getThreshold */\n    CV_WRAP virtual void setThreshold(double val) = 0;\n    CV_WRAP virtual std::vector<cv::Mat> getHistograms() const = 0;\n    CV_WRAP virtual cv::Mat getLabels() const = 0;\n};\n\n/**\n@param radius The radius used for building the Circular Local Binary Pattern. The greater the\nradius, the\n@param neighbors The number of sample points to build a Circular Local Binary Pattern from. An\nappropriate value is to use `8` sample points. Keep in mind: the more sample points you include,\nthe higher the computational cost.\n@param grid_x The number of cells in the horizontal direction, 8 is a common value used in\npublications. The more cells, the finer the grid, the higher the dimensionality of the resulting\nfeature vector.\n@param grid_y The number of cells in the vertical direction, 8 is a common value used in\npublications. The more cells, the finer the grid, the higher the dimensionality of the resulting\nfeature vector.\n@param threshold The threshold applied in the prediction. If the distance to the nearest neighbor\nis larger than the threshold, this method returns -1.\n\n### Notes:\n\n-   The Circular Local Binary Patterns (used in training and prediction) expect the data given as\n    grayscale images, use cvtColor to convert between the color spaces.\n-   This model supports updating.\n\n### Model internal data:\n\n-   radius see createLBPHFaceRecognizer.\n-   neighbors see createLBPHFaceRecognizer.\n-   grid_x see createLBPHFaceRecognizer.\n-   grid_y see createLBPHFaceRecognizer.\n-   threshold see createLBPHFaceRecognizer.\n-   histograms Local Binary Patterns Histograms calculated from the given training data (empty if\n    none was given).\n-   labels Labels corresponding to the calculated Local Binary Patterns Histograms.\n */\nCV_EXPORTS_W Ptr<LBPHFaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8, int grid_x=8, int grid_y=8, double threshold = DBL_MAX);\n\n//! @}\n\n}} //namespace cv::face\n\n#endif //__OPENCV_FACEREC_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/face/predict_collector.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this license.\nIf you do not agree to this license, do not download, install,\ncopy or use the software.\n\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2000-2015, Intel Corporation, all rights reserved.\nCopyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\nCopyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.\nCopyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.\nCopyright (C) 2015, OpenCV Foundation, all rights reserved.\nCopyright (C) 2015, Itseez Inc., all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are disclaimed.\nIn no event shall copyright holders or contributors be liable for any direct,\nindirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_PREDICT_COLLECTOR_HPP__\n#define __OPENCV_PREDICT_COLLECTOR_HPP__\n#include <cfloat>\n#include \"opencv2/core/cvdef.h\"\n#include \"opencv2/core/cvstd.hpp\"\nnamespace cv {\nnamespace face {\n//! @addtogroup face\n//! @{\n/** @brief Abstract base class for all strategies of prediction result handling\n*/\nclass CV_EXPORTS_W PredictCollector {\nprotected:\n    double _threshhold;\n    int _size;\n    int _state;\npublic:\n    /** @brief creates new predict collector with given threshhold */\n    PredictCollector(double threshhold = DBL_MAX) :_threshhold(threshhold) {};\n    CV_WRAP virtual ~PredictCollector() {}\n    /** @brief called once at start of recognition\n    @param size total size of prediction evaluation that recognizer could perform\n    @param state user defined send-to-back optional value to allow multi-thread, multi-session or aggregation scenarios\n    */\n    CV_WRAP virtual void init(const int size, const int state = 0);\n    /** @brief called with every recognition result\n    @param label current prediction label\n    @param dist current prediction distance (confidence)\n    @param state user defined send-to-back optional value to allow multi-thread, multi-session or aggregation scenarios\n    @return true if recognizer should proceed prediction , false - if recognizer should terminate prediction\n    */\n    CV_WRAP virtual bool emit(const int label, const double dist, const int state = 0); //not abstract while Python generation require non-abstract class\n};\n\n/** @brief default predict collector that trace minimal distance with treshhold checking (that is default behavior for most predict logic)\n*/\nclass CV_EXPORTS_W MinDistancePredictCollector : public PredictCollector {\nprivate:\n    int _label;\n    double _dist;\npublic:\n    /** @brief creates new MinDistancePredictCollector with given threshhold */\n    CV_WRAP MinDistancePredictCollector(double threshhold = DBL_MAX) : PredictCollector(threshhold) {\n        _label = 0;\n        _dist = DBL_MAX;\n    };\n    CV_WRAP bool emit(const int label, const double dist, const int state = 0);\n    /** @brief result label, 0 if not found */\n    CV_WRAP int getLabel() const;\n    /** @brief result distance (confidence) DBL_MAX if not found */\n    CV_WRAP double getDist() const;\n    /** @brief factory method to create cv-pointers to MinDistancePredictCollector */\n    CV_WRAP static Ptr<MinDistancePredictCollector> create(double threshold = DBL_MAX);\n};\n//! @}\n}\n}\n#endif"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/face.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_FACE_HPP__\n#define __OPENCV_FACE_HPP__\n\n/**\n@defgroup face Face Recognition\n\n- @ref face_changelog\n- @ref tutorial_face_main\n\n*/\n\n#include \"opencv2/core.hpp\"\n#include \"face/predict_collector.hpp\"\n#include <map>\n\nnamespace cv { namespace face {\n\n//! @addtogroup face\n//! @{\n\n/** @brief Abstract base class for all face recognition models\n\nAll face recognition models in OpenCV are derived from the abstract base class FaceRecognizer, which\nprovides a unified access to all face recongition algorithms in OpenCV.\n\n### Description\n\nI'll go a bit more into detail explaining FaceRecognizer, because it doesn't look like a powerful\ninterface at first sight. But: Every FaceRecognizer is an Algorithm, so you can easily get/set all\nmodel internals (if allowed by the implementation). Algorithm is a relatively new OpenCV concept,\nwhich is available since the 2.4 release. I suggest you take a look at its description.\n\nAlgorithm provides the following features for all derived classes:\n\n-   So called “virtual constructor”. That is, each Algorithm derivative is registered at program\n    start and you can get the list of registered algorithms and create instance of a particular\n    algorithm by its name (see Algorithm::create). If you plan to add your own algorithms, it is\n    good practice to add a unique prefix to your algorithms to distinguish them from other\n    algorithms.\n-   Setting/Retrieving algorithm parameters by name. If you used video capturing functionality from\n    OpenCV highgui module, you are probably familar with cv::cvSetCaptureProperty,\nocvcvGetCaptureProperty, VideoCapture::set and VideoCapture::get. Algorithm provides similar\n    method where instead of integer id's you specify the parameter names as text Strings. See\n    Algorithm::set and Algorithm::get for details.\n-   Reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store\n    all its parameters and then read them back. There is no need to re-implement it each time.\n\nMoreover every FaceRecognizer supports the:\n\n-   **Training** of a FaceRecognizer with FaceRecognizer::train on a given set of images (your face\n    database!).\n-   **Prediction** of a given sample image, that means a face. The image is given as a Mat.\n-   **Loading/Saving** the model state from/to a given XML or YAML.\n-   **Setting/Getting labels info**, that is stored as a string. String labels info is useful for\n    keeping names of the recognized people.\n\n@note When using the FaceRecognizer interface in combination with Python, please stick to Python 2.\nSome underlying scripts like create_csv will not work in other versions, like Python 3. Setting the\nThresholds +++++++++++++++++++++++\n\nSometimes you run into the situation, when you want to apply a threshold on the prediction. A common\nscenario in face recognition is to tell, whether a face belongs to the training dataset or if it is\nunknown. You might wonder, why there's no public API in FaceRecognizer to set the threshold for the\nprediction, but rest assured: It's supported. It just means there's no generic way in an abstract\nclass to provide an interface for setting/getting the thresholds of *every possible* FaceRecognizer\nalgorithm. The appropriate place to set the thresholds is in the constructor of the specific\nFaceRecognizer and since every FaceRecognizer is a Algorithm (see above), you can get/set the\nthresholds at runtime!\n\nHere is an example of setting a threshold for the Eigenfaces method, when creating the model:\n\n@code\n// Let's say we want to keep 10 Eigenfaces and have a threshold value of 10.0\nint num_components = 10;\ndouble threshold = 10.0;\n// Then if you want to have a cv::FaceRecognizer with a confidence threshold,\n// create the concrete implementation with the appropiate parameters:\nPtr<FaceRecognizer> model = createEigenFaceRecognizer(num_components, threshold);\n@endcode\n\nSometimes it's impossible to train the model, just to experiment with threshold values. Thanks to\nAlgorithm it's possible to set internal model thresholds during runtime. Let's see how we would\nset/get the prediction for the Eigenface model, we've created above:\n\n@code\n// The following line reads the threshold from the Eigenfaces model:\ndouble current_threshold = model->getDouble(\"threshold\");\n// And this line sets the threshold to 0.0:\nmodel->set(\"threshold\", 0.0);\n@endcode\n\nIf you've set the threshold to 0.0 as we did above, then:\n\n@code\n//\nMat img = imread(\"person1/3.jpg\", CV_LOAD_IMAGE_GRAYSCALE);\n// Get a prediction from the model. Note: We've set a threshold of 0.0 above,\n// since the distance is almost always larger than 0.0, you'll get -1 as\n// label, which indicates, this face is unknown\nint predicted_label = model->predict(img);\n// ...\n@endcode\n\nis going to yield -1 as predicted label, which states this face is unknown.\n\n### Getting the name of a FaceRecognizer\n\nSince every FaceRecognizer is a Algorithm, you can use Algorithm::name to get the name of a\nFaceRecognizer:\n\n@code\n// Create a FaceRecognizer:\nPtr<FaceRecognizer> model = createEigenFaceRecognizer();\n// And here's how to get its name:\nString name = model->name();\n@endcode\n\n */\nclass CV_EXPORTS_W FaceRecognizer : public Algorithm\n{\npublic:\n    /** @brief Trains a FaceRecognizer with given data and associated labels.\n\n    @param src The training images, that means the faces you want to learn. The data has to be\n    given as a vector\\<Mat\\>.\n    @param labels The labels corresponding to the images have to be given either as a vector\\<int\\>\n    or a\n\n    The following source code snippet shows you how to learn a Fisherfaces model on a given set of\n    images. The images are read with imread and pushed into a std::vector\\<Mat\\>. The labels of each\n    image are stored within a std::vector\\<int\\> (you could also use a Mat of type CV_32SC1). Think of\n    the label as the subject (the person) this image belongs to, so same subjects (persons) should have\n    the same label. For the available FaceRecognizer you don't have to pay any attention to the order of\n    the labels, just make sure same persons have the same label:\n\n    @code\n    // holds images and labels\n    vector<Mat> images;\n    vector<int> labels;\n    // images for first person\n    images.push_back(imread(\"person0/0.jpg\", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0);\n    images.push_back(imread(\"person0/1.jpg\", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0);\n    images.push_back(imread(\"person0/2.jpg\", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0);\n    // images for second person\n    images.push_back(imread(\"person1/0.jpg\", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1);\n    images.push_back(imread(\"person1/1.jpg\", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1);\n    images.push_back(imread(\"person1/2.jpg\", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1);\n    @endcode\n\n    Now that you have read some images, we can create a new FaceRecognizer. In this example I'll create\n    a Fisherfaces model and decide to keep all of the possible Fisherfaces:\n\n    @code\n    // Create a new Fisherfaces model and retain all available Fisherfaces,\n    // this is the most common usage of this specific FaceRecognizer:\n    //\n    Ptr<FaceRecognizer> model =  createFisherFaceRecognizer();\n    @endcode\n\n    And finally train it on the given dataset (the face images and labels):\n\n    @code\n    // This is the common interface to train all of the available cv::FaceRecognizer\n    // implementations:\n    //\n    model->train(images, labels);\n    @endcode\n     */\n    CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0;\n\n    /** @brief Updates a FaceRecognizer with given data and associated labels.\n\n    @param src The training images, that means the faces you want to learn. The data has to be given\n    as a vector\\<Mat\\>.\n    @param labels The labels corresponding to the images have to be given either as a vector\\<int\\> or\n    a\n\n    This method updates a (probably trained) FaceRecognizer, but only if the algorithm supports it. The\n    Local Binary Patterns Histograms (LBPH) recognizer (see createLBPHFaceRecognizer) can be updated.\n    For the Eigenfaces and Fisherfaces method, this is algorithmically not possible and you have to\n    re-estimate the model with FaceRecognizer::train. In any case, a call to train empties the existing\n    model and learns a new model, while update does not delete any model data.\n\n    @code\n    // Create a new LBPH model (it can be updated) and use the default parameters,\n    // this is the most common usage of this specific FaceRecognizer:\n    //\n    Ptr<FaceRecognizer> model =  createLBPHFaceRecognizer();\n    // This is the common interface to train all of the available cv::FaceRecognizer\n    // implementations:\n    //\n    model->train(images, labels);\n    // Some containers to hold new image:\n    vector<Mat> newImages;\n    vector<int> newLabels;\n    // You should add some images to the containers:\n    //\n    // ...\n    //\n    // Now updating the model is as easy as calling:\n    model->update(newImages,newLabels);\n    // This will preserve the old model data and extend the existing model\n    // with the new features extracted from newImages!\n    @endcode\n\n    Calling update on an Eigenfaces model (see createEigenFaceRecognizer), which doesn't support\n    updating, will throw an error similar to:\n\n    @code\n    OpenCV Error: The function/feature is not implemented (This FaceRecognizer (FaceRecognizer.Eigenfaces) does not support updating, you have to use FaceRecognizer::train to update it.) in update, file /home/philipp/git/opencv/modules/contrib/src/facerec.cpp, line 305\n    terminate called after throwing an instance of 'cv::Exception'\n    @endcode\n\n    @note The FaceRecognizer does not store your training images, because this would be very\n    memory intense and it's not the responsibility of te FaceRecognizer to do so. The caller is\n    responsible for maintaining the dataset, he want to work with.\n     */\n    CV_WRAP virtual void update(InputArrayOfArrays src, InputArray labels);\n\n    /** @overload */\n    CV_WRAP int predict(InputArray src) const;\n\n\n    /** @brief Predicts a label and associated confidence (e.g. distance) for a given input image.\n\n    @param src Sample image to get a prediction from.\n    @param label The predicted label for the given image.\n    @param confidence Associated confidence (e.g. distance) for the predicted label.\n\n    The suffix const means that prediction does not affect the internal model state, so the method can\n    be safely called from within different threads.\n\n    The following example shows how to get a prediction from a trained model:\n\n    @code\n    using namespace cv;\n    // Do your initialization here (create the cv::FaceRecognizer model) ...\n    // ...\n    // Read in a sample image:\n    Mat img = imread(\"person1/3.jpg\", CV_LOAD_IMAGE_GRAYSCALE);\n    // And get a prediction from the cv::FaceRecognizer:\n    int predicted = model->predict(img);\n    @endcode\n\n    Or to get a prediction and the associated confidence (e.g. distance):\n\n    @code\n    using namespace cv;\n    // Do your initialization here (create the cv::FaceRecognizer model) ...\n    // ...\n    Mat img = imread(\"person1/3.jpg\", CV_LOAD_IMAGE_GRAYSCALE);\n    // Some variables for the predicted label and associated confidence (e.g. distance):\n    int predicted_label = -1;\n    double predicted_confidence = 0.0;\n    // Get the prediction and associated confidence from the model\n    model->predict(img, predicted_label, predicted_confidence);\n    @endcode\n     */\n    CV_WRAP void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const;\n\n\n    /** @brief - if implemented - send all result of prediction to collector that can be used for somehow custom result handling\n    @param src Sample image to get a prediction from.\n    @param collector User-defined collector object that accepts all results\n    @param state - optional user-defined state token that should be passed back from FaceRecognizer implementation\n\n    To implement this method u just have to do same internal cycle as in predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) but\n    not try to get \"best@ result, just resend it to caller side with given collector\n    */\n    CV_WRAP virtual void predict(InputArray src, Ptr<PredictCollector> collector, const int state = 0) const = 0;\n\n    /** @brief Saves a FaceRecognizer and its model state.\n\n    Saves this model to a given filename, either as XML or YAML.\n    @param filename The filename to store this FaceRecognizer to (either XML/YAML).\n\n    Every FaceRecognizer overwrites FaceRecognizer::save(FileStorage& fs) to save the internal model\n    state. FaceRecognizer::save(const String& filename) saves the state of a model to the given\n    filename.\n\n    The suffix const means that prediction does not affect the internal model state, so the method can\n    be safely called from within different threads.\n     */\n    CV_WRAP virtual void save(const String& filename) const;\n\n    /** @brief Loads a FaceRecognizer and its model state.\n\n    Loads a persisted model and state from a given XML or YAML file . Every FaceRecognizer has to\n    overwrite FaceRecognizer::load(FileStorage& fs) to enable loading the model state.\n    FaceRecognizer::load(FileStorage& fs) in turn gets called by\n    FaceRecognizer::load(const String& filename), to ease saving a model.\n     */\n    CV_WRAP virtual void load(const String& filename);\n\n    /** @overload\n    Saves this model to a given FileStorage.\n    @param fs The FileStorage to store this FaceRecognizer to.\n    */\n    virtual void save(FileStorage& fs) const = 0;\n\n    /** @overload */\n    virtual void load(const FileStorage& fs) = 0;\n\n    /** @brief Sets string info for the specified model's label.\n\n    The string info is replaced by the provided value if it was set before for the specified label.\n     */\n    CV_WRAP virtual void setLabelInfo(int label, const String& strInfo);\n\n    /** @brief Gets string information by label.\n\n    If an unknown label id is provided or there is no label information associated with the specified\n    label id the method returns an empty string.\n     */\n    CV_WRAP virtual String getLabelInfo(int label) const;\n\n    /** @brief Gets vector of labels by string.\n\n    The function searches for the labels containing the specified sub-string in the associated string\n    info.\n     */\n    CV_WRAP virtual std::vector<int> getLabelsByString(const String& str) const;\n    /** @brief threshhold parameter accessor - required for default BestMinDist collector */\n    virtual double getThreshold() const = 0;\nprotected:\n    // Stored pairs \"label id - string info\"\n    std::map<int, String> _labelsInfo;\n};\n\n//! @}\n\n}}\n\n#include \"opencv2/face/facerec.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/features2d/features2d.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/features2d.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/features2d.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_FEATURES_2D_HPP__\n#define __OPENCV_FEATURES_2D_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/flann/miniflann.hpp\"\n\n/**\n  @defgroup features2d 2D Features Framework\n  @{\n    @defgroup features2d_main Feature Detection and Description\n    @defgroup features2d_match Descriptor Matchers\n\nMatchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to\neasily switch between different algorithms solving the same problem. This section is devoted to\nmatching descriptors that are represented as vectors in a multidimensional space. All objects that\nimplement vector descriptor matchers inherit the DescriptorMatcher interface.\n\n@note\n   -   An example explaining keypoint matching can be found at\n        opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp\n    -   An example on descriptor matching evaluation can be found at\n        opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp\n    -   An example on one to many image matching can be found at\n        opencv_source_code/samples/cpp/matching_to_many_images.cpp\n\n    @defgroup features2d_draw Drawing Function of Keypoints and Matches\n    @defgroup features2d_category Object Categorization\n\nThis section describes approaches based on local 2D features and used to categorize objects.\n\n@note\n   -   A complete Bag-Of-Words sample can be found at\n        opencv_source_code/samples/cpp/bagofwords_classification.cpp\n    -   (Python) An example using the features2D framework to perform object categorization can be\n        found at opencv_source_code/samples/python/find_obj.py\n\n  @}\n */\n\nnamespace cv\n{\n\n//! @addtogroup features2d\n//! @{\n\n// //! writes vector of keypoints to the file storage\n// CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints);\n// //! reads vector of keypoints from the specified file storage node\n// CV_EXPORTS void read(const FileNode& node, CV_OUT std::vector<KeyPoint>& keypoints);\n\n/** @brief A class filters a vector of keypoints.\n\n Because now it is difficult to provide a convenient interface for all usage scenarios of the\n keypoints filter class, it has only several needed by now static methods.\n */\nclass CV_EXPORTS KeyPointsFilter\n{\npublic:\n    KeyPointsFilter(){}\n\n    /*\n     * Remove keypoints within borderPixels of an image edge.\n     */\n    static void runByImageBorder( std::vector<KeyPoint>& keypoints, Size imageSize, int borderSize );\n    /*\n     * Remove keypoints of sizes out of range.\n     */\n    static void runByKeypointSize( std::vector<KeyPoint>& keypoints, float minSize,\n                                   float maxSize=FLT_MAX );\n    /*\n     * Remove keypoints from some image by mask for pixels of this image.\n     */\n    static void runByPixelsMask( std::vector<KeyPoint>& keypoints, const Mat& mask );\n    /*\n     * Remove duplicated keypoints.\n     */\n    static void removeDuplicated( std::vector<KeyPoint>& keypoints );\n\n    /*\n     * Retain the specified number of the best keypoints (according to the response)\n     */\n    static void retainBest( std::vector<KeyPoint>& keypoints, int npoints );\n};\n\n\n/************************************ Base Classes ************************************/\n\n/** @brief Abstract base class for 2D image feature detectors and descriptor extractors\n*/\nclass CV_EXPORTS_W Feature2D : public virtual Algorithm\n{\npublic:\n    virtual ~Feature2D();\n\n    /** @brief Detects keypoints in an image (first variant) or image set (second variant).\n\n    @param image Image.\n    @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set\n    of keypoints detected in images[i] .\n    @param mask Mask specifying where to look for keypoints (optional). It must be a 8-bit integer\n    matrix with non-zero values in the region of interest.\n     */\n    CV_WRAP virtual void detect( InputArray image,\n                                 CV_OUT std::vector<KeyPoint>& keypoints,\n                                 InputArray mask=noArray() );\n\n    /** @overload\n    @param images Image set.\n    @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set\n    of keypoints detected in images[i] .\n    @param masks Masks for each input image specifying where to look for keypoints (optional).\n    masks[i] is a mask for images[i].\n    */\n    virtual void detect( InputArrayOfArrays images,\n                         std::vector<std::vector<KeyPoint> >& keypoints,\n                         InputArrayOfArrays masks=noArray() );\n\n    /** @brief Computes the descriptors for a set of keypoints detected in an image (first variant) or image set\n    (second variant).\n\n    @param image Image.\n    @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be\n    computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint\n    with several dominant orientations (for each orientation).\n    @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are\n    descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the\n    descriptor for keypoint j-th keypoint.\n     */\n    CV_WRAP virtual void compute( InputArray image,\n                                  CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints,\n                                  OutputArray descriptors );\n\n    /** @overload\n\n    @param images Image set.\n    @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be\n    computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint\n    with several dominant orientations (for each orientation).\n    @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are\n    descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the\n    descriptor for keypoint j-th keypoint.\n    */\n    virtual void compute( InputArrayOfArrays images,\n                          std::vector<std::vector<KeyPoint> >& keypoints,\n                          OutputArrayOfArrays descriptors );\n\n    /** Detects keypoints and computes the descriptors */\n    CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask,\n                                           CV_OUT std::vector<KeyPoint>& keypoints,\n                                           OutputArray descriptors,\n                                           bool useProvidedKeypoints=false );\n\n    CV_WRAP virtual int descriptorSize() const;\n    CV_WRAP virtual int descriptorType() const;\n    CV_WRAP virtual int defaultNorm() const;\n\n    //! Return true if detector object is empty\n    CV_WRAP virtual bool empty() const;\n};\n\n/** Feature detectors in OpenCV have wrappers with a common interface that enables you to easily switch\nbetween different algorithms solving the same problem. All objects that implement keypoint detectors\ninherit the FeatureDetector interface. */\ntypedef Feature2D FeatureDetector;\n\n/** Extractors of keypoint descriptors in OpenCV have wrappers with a common interface that enables you\nto easily switch between different algorithms solving the same problem. This section is devoted to\ncomputing descriptors represented as vectors in a multidimensional space. All objects that implement\nthe vector descriptor extractors inherit the DescriptorExtractor interface.\n */\ntypedef Feature2D DescriptorExtractor;\n\n//! @addtogroup features2d_main\n//! @{\n\n/** @brief Class implementing the BRISK keypoint detector and descriptor extractor, described in @cite LCS11 .\n */\nclass CV_EXPORTS_W BRISK : public Feature2D\n{\npublic:\n    /** @brief The BRISK constructor\n\n    @param thresh AGAST detection threshold score.\n    @param octaves detection octaves. Use 0 to do single scale.\n    @param patternScale apply this scale to the pattern used for sampling the neighbourhood of a\n    keypoint.\n     */\n    CV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f);\n\n    /** @brief The BRISK constructor for a custom pattern\n\n    @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for\n    keypoint scale 1).\n    @param numberList defines the number of sampling points on the sampling circle. Must be the same\n    size as radiusList..\n    @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint\n    scale 1).\n    @param dMin threshold for the long pairings used for orientation determination (in pixels for\n    keypoint scale 1).\n    @param indexChange index remapping of the bits. */\n    CV_WRAP static Ptr<BRISK> create(const std::vector<float> &radiusList, const std::vector<int> &numberList,\n        float dMax=5.85f, float dMin=8.2f, const std::vector<int>& indexChange=std::vector<int>());\n};\n\n/** @brief Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor\n\ndescribed in @cite RRKB11 . The algorithm uses FAST in pyramids to detect stable keypoints, selects\nthe strongest features using FAST or Harris response, finds their orientation using first-order\nmoments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or\nk-tuples) are rotated according to the measured orientation).\n */\nclass CV_EXPORTS_W ORB : public Feature2D\n{\npublic:\n    enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };\n\n    /** @brief The ORB constructor\n\n    @param nfeatures The maximum number of features to retain.\n    @param scaleFactor Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical\n    pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor\n    will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor\n    will mean that to cover certain scale range you will need more pyramid levels and so the speed\n    will suffer.\n    @param nlevels The number of pyramid levels. The smallest level will have linear size equal to\n    input_image_linear_size/pow(scaleFactor, nlevels).\n    @param edgeThreshold This is size of the border where the features are not detected. It should\n    roughly match the patchSize parameter.\n    @param firstLevel It should be 0 in the current implementation.\n    @param WTA_K The number of points that produce each element of the oriented BRIEF descriptor. The\n    default value 2 means the BRIEF where we take a random point pair and compare their brightnesses,\n    so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3\n    random points (of course, those point coordinates are random, but they are generated from the\n    pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel\n    rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such\n    output will occupy 2 bits, and therefore it will need a special variant of Hamming distance,\n    denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each\n    bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).\n    @param scoreType The default HARRIS_SCORE means that Harris algorithm is used to rank features\n    (the score is written to KeyPoint::score and is used to retain best nfeatures features);\n    FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints,\n    but it is a little faster to compute.\n    @param patchSize size of the patch used by the oriented BRIEF descriptor. Of course, on smaller\n    pyramid layers the perceived image area covered by a feature will be larger.\n    @param fastThreshold\n     */\n    CV_WRAP static Ptr<ORB> create(int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31,\n        int firstLevel=0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20);\n\n    CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;\n    CV_WRAP virtual int getMaxFeatures() const = 0;\n\n    CV_WRAP virtual void setScaleFactor(double scaleFactor) = 0;\n    CV_WRAP virtual double getScaleFactor() const = 0;\n\n    CV_WRAP virtual void setNLevels(int nlevels) = 0;\n    CV_WRAP virtual int getNLevels() const = 0;\n\n    CV_WRAP virtual void setEdgeThreshold(int edgeThreshold) = 0;\n    CV_WRAP virtual int getEdgeThreshold() const = 0;\n\n    CV_WRAP virtual void setFirstLevel(int firstLevel) = 0;\n    CV_WRAP virtual int getFirstLevel() const = 0;\n\n    CV_WRAP virtual void setWTA_K(int wta_k) = 0;\n    CV_WRAP virtual int getWTA_K() const = 0;\n\n    CV_WRAP virtual void setScoreType(int scoreType) = 0;\n    CV_WRAP virtual int getScoreType() const = 0;\n\n    CV_WRAP virtual void setPatchSize(int patchSize) = 0;\n    CV_WRAP virtual int getPatchSize() const = 0;\n\n    CV_WRAP virtual void setFastThreshold(int fastThreshold) = 0;\n    CV_WRAP virtual int getFastThreshold() const = 0;\n};\n\n/** @brief Maximally stable extremal region extractor\n\nThe class encapsulates all the parameters of the %MSER extraction algorithm (see [wiki\narticle](http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions)).\n\n- there are two different implementation of %MSER: one for grey image, one for color image\n\n- the grey image algorithm is taken from: @cite nister2008linear ;  the paper claims to be faster\nthan union-find method; it actually get 1.5~2m/s on my centrino L7200 1.2GHz laptop.\n\n- the color image algorithm is taken from: @cite forssen2007maximally ; it should be much slower\nthan grey image method ( 3~4 times ); the chi_table.h file is taken directly from paper's source\ncode which is distributed under GPL.\n\n- (Python) A complete example showing the use of the %MSER detector can be found at samples/python/mser.py\n*/\nclass CV_EXPORTS_W MSER : public Feature2D\n{\npublic:\n    /** @brief Full consturctor for %MSER detector\n\n    @param _delta it compares \\f$(size_{i}-size_{i-delta})/size_{i-delta}\\f$\n    @param _min_area prune the area which smaller than minArea\n    @param _max_area prune the area which bigger than maxArea\n    @param _max_variation prune the area have simliar size to its children\n    @param _min_diversity for color image, trace back to cut off mser with diversity less than min_diversity\n    @param _max_evolution  for color image, the evolution steps\n    @param _area_threshold for color image, the area threshold to cause re-initialize\n    @param _min_margin for color image, ignore too small margin\n    @param _edge_blur_size for color image, the aperture size for edge blur\n     */\n    CV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400,\n          double _max_variation=0.25, double _min_diversity=.2,\n          int _max_evolution=200, double _area_threshold=1.01,\n          double _min_margin=0.003, int _edge_blur_size=5 );\n\n    /** @brief Detect %MSER regions\n\n    @param image input image (8UC1, 8UC3 or 8UC4)\n    @param msers resulting list of point sets\n    @param bboxes resulting bounding boxes\n    */\n    CV_WRAP virtual void detectRegions( InputArray image,\n                                        CV_OUT std::vector<std::vector<Point> >& msers,\n                                        std::vector<Rect>& bboxes ) = 0;\n\n    CV_WRAP virtual void setDelta(int delta) = 0;\n    CV_WRAP virtual int getDelta() const = 0;\n\n    CV_WRAP virtual void setMinArea(int minArea) = 0;\n    CV_WRAP virtual int getMinArea() const = 0;\n\n    CV_WRAP virtual void setMaxArea(int maxArea) = 0;\n    CV_WRAP virtual int getMaxArea() const = 0;\n\n    CV_WRAP virtual void setPass2Only(bool f) = 0;\n    CV_WRAP virtual bool getPass2Only() const = 0;\n};\n\n/** @overload */\nCV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,\n                      int threshold, bool nonmaxSuppression=true );\n\n/** @brief Detects corners using the FAST algorithm\n\n@param image grayscale image where keypoints (corners) are detected.\n@param keypoints keypoints detected on the image.\n@param threshold threshold on difference between intensity of the central pixel and pixels of a\ncircle around this pixel.\n@param nonmaxSuppression if true, non-maximum suppression is applied to detected corners\n(keypoints).\n@param type one of the three neighborhoods as defined in the paper:\nFastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12,\nFastFeatureDetector::TYPE_5_8\n\nDetects corners using the FAST algorithm by @cite Rosten06 .\n\n@note In Python API, types are given as cv2.FAST_FEATURE_DETECTOR_TYPE_5_8,\ncv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner\ndetection, use cv2.FAST.detect() method.\n */\nCV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,\n                      int threshold, bool nonmaxSuppression, int type );\n\n//! @} features2d_main\n\n//! @addtogroup features2d_main\n//! @{\n\n/** @brief Wrapping class for feature detection using the FAST method. :\n */\nclass CV_EXPORTS_W FastFeatureDetector : public Feature2D\n{\npublic:\n    enum\n    {\n        TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2,\n        THRESHOLD = 10000, NONMAX_SUPPRESSION=10001, FAST_N=10002,\n    };\n\n    CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10,\n                                                    bool nonmaxSuppression=true,\n                                                    int type=FastFeatureDetector::TYPE_9_16 );\n\n    CV_WRAP virtual void setThreshold(int threshold) = 0;\n    CV_WRAP virtual int getThreshold() const = 0;\n\n    CV_WRAP virtual void setNonmaxSuppression(bool f) = 0;\n    CV_WRAP virtual bool getNonmaxSuppression() const = 0;\n\n    CV_WRAP virtual void setType(int type) = 0;\n    CV_WRAP virtual int getType() const = 0;\n};\n\n/** @overload */\nCV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,\n                      int threshold, bool nonmaxSuppression=true );\n\n/** @brief Detects corners using the AGAST algorithm\n\n@param image grayscale image where keypoints (corners) are detected.\n@param keypoints keypoints detected on the image.\n@param threshold threshold on difference between intensity of the central pixel and pixels of a\ncircle around this pixel.\n@param nonmaxSuppression if true, non-maximum suppression is applied to detected corners\n(keypoints).\n@param type one of the four neighborhoods as defined in the paper:\nAgastFeatureDetector::AGAST_5_8, AgastFeatureDetector::AGAST_7_12d,\nAgastFeatureDetector::AGAST_7_12s, AgastFeatureDetector::OAST_9_16\n\nFor non-Intel platforms, there is a tree optimised variant of AGAST with same numerical results.\nThe 32-bit binary tree tables were generated automatically from original code using perl script.\nThe perl script and examples of tree generation are placed in features2d/doc folder.\nDetects corners using the AGAST algorithm by @cite mair2010_agast .\n\n */\nCV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,\n                      int threshold, bool nonmaxSuppression, int type );\n//! @} features2d_main\n\n//! @addtogroup features2d_main\n//! @{\n\n/** @brief Wrapping class for feature detection using the AGAST method. :\n */\nclass CV_EXPORTS_W AgastFeatureDetector : public Feature2D\n{\npublic:\n    enum\n    {\n        AGAST_5_8 = 0, AGAST_7_12d = 1, AGAST_7_12s = 2, OAST_9_16 = 3,\n        THRESHOLD = 10000, NONMAX_SUPPRESSION = 10001,\n    };\n\n    CV_WRAP static Ptr<AgastFeatureDetector> create( int threshold=10,\n                                                     bool nonmaxSuppression=true,\n                                                     int type=AgastFeatureDetector::OAST_9_16 );\n\n    CV_WRAP virtual void setThreshold(int threshold) = 0;\n    CV_WRAP virtual int getThreshold() const = 0;\n\n    CV_WRAP virtual void setNonmaxSuppression(bool f) = 0;\n    CV_WRAP virtual bool getNonmaxSuppression() const = 0;\n\n    CV_WRAP virtual void setType(int type) = 0;\n    CV_WRAP virtual int getType() const = 0;\n};\n\n/** @brief Wrapping class for feature detection using the goodFeaturesToTrack function. :\n */\nclass CV_EXPORTS_W GFTTDetector : public Feature2D\n{\npublic:\n    CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,\n                                             int blockSize=3, bool useHarrisDetector=false, double k=0.04 );\n    CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;\n    CV_WRAP virtual int getMaxFeatures() const = 0;\n\n    CV_WRAP virtual void setQualityLevel(double qlevel) = 0;\n    CV_WRAP virtual double getQualityLevel() const = 0;\n\n    CV_WRAP virtual void setMinDistance(double minDistance) = 0;\n    CV_WRAP virtual double getMinDistance() const = 0;\n\n    CV_WRAP virtual void setBlockSize(int blockSize) = 0;\n    CV_WRAP virtual int getBlockSize() const = 0;\n\n    CV_WRAP virtual void setHarrisDetector(bool val) = 0;\n    CV_WRAP virtual bool getHarrisDetector() const = 0;\n\n    CV_WRAP virtual void setK(double k) = 0;\n    CV_WRAP virtual double getK() const = 0;\n};\n\n/** @brief Class for extracting blobs from an image. :\n\nThe class implements a simple algorithm for extracting blobs from an image:\n\n1.  Convert the source image to binary images by applying thresholding with several thresholds from\n    minThreshold (inclusive) to maxThreshold (exclusive) with distance thresholdStep between\n    neighboring thresholds.\n2.  Extract connected components from every binary image by findContours and calculate their\n    centers.\n3.  Group centers from several binary images by their coordinates. Close centers form one group that\n    corresponds to one blob, which is controlled by the minDistBetweenBlobs parameter.\n4.  From the groups, estimate final centers of blobs and their radiuses and return as locations and\n    sizes of keypoints.\n\nThis class performs several filtrations of returned blobs. You should set filterBy\\* to true/false\nto turn on/off corresponding filtration. Available filtrations:\n\n-   **By color**. This filter compares the intensity of a binary image at the center of a blob to\nblobColor. If they differ, the blob is filtered out. Use blobColor = 0 to extract dark blobs\nand blobColor = 255 to extract light blobs.\n-   **By area**. Extracted blobs have an area between minArea (inclusive) and maxArea (exclusive).\n-   **By circularity**. Extracted blobs have circularity\n(\\f$\\frac{4*\\pi*Area}{perimeter * perimeter}\\f$) between minCircularity (inclusive) and\nmaxCircularity (exclusive).\n-   **By ratio of the minimum inertia to maximum inertia**. Extracted blobs have this ratio\nbetween minInertiaRatio (inclusive) and maxInertiaRatio (exclusive).\n-   **By convexity**. Extracted blobs have convexity (area / area of blob convex hull) between\nminConvexity (inclusive) and maxConvexity (exclusive).\n\nDefault values of parameters are tuned to extract dark circular blobs.\n */\nclass CV_EXPORTS_W SimpleBlobDetector : public Feature2D\n{\npublic:\n  struct CV_EXPORTS_W_SIMPLE Params\n  {\n      CV_WRAP Params();\n      CV_PROP_RW float thresholdStep;\n      CV_PROP_RW float minThreshold;\n      CV_PROP_RW float maxThreshold;\n      CV_PROP_RW size_t minRepeatability;\n      CV_PROP_RW float minDistBetweenBlobs;\n\n      CV_PROP_RW bool filterByColor;\n      CV_PROP_RW uchar blobColor;\n\n      CV_PROP_RW bool filterByArea;\n      CV_PROP_RW float minArea, maxArea;\n\n      CV_PROP_RW bool filterByCircularity;\n      CV_PROP_RW float minCircularity, maxCircularity;\n\n      CV_PROP_RW bool filterByInertia;\n      CV_PROP_RW float minInertiaRatio, maxInertiaRatio;\n\n      CV_PROP_RW bool filterByConvexity;\n      CV_PROP_RW float minConvexity, maxConvexity;\n\n      void read( const FileNode& fn );\n      void write( FileStorage& fs ) const;\n  };\n\n  CV_WRAP static Ptr<SimpleBlobDetector>\n    create(const SimpleBlobDetector::Params &parameters = SimpleBlobDetector::Params());\n};\n\n//! @} features2d_main\n\n//! @addtogroup features2d_main\n//! @{\n\n/** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12 .\n\n@note AKAZE descriptor can only be used with KAZE or AKAZE keypoints .. [ABD12] KAZE Features. Pablo\nF. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision\n(ECCV), Fiorenze, Italy, October 2012.\n*/\nclass CV_EXPORTS_W KAZE : public Feature2D\n{\npublic:\n    enum\n    {\n        DIFF_PM_G1 = 0,\n        DIFF_PM_G2 = 1,\n        DIFF_WEICKERT = 2,\n        DIFF_CHARBONNIER = 3\n    };\n\n    /** @brief The KAZE constructor\n\n    @param extended Set to enable extraction of extended (128-byte) descriptor.\n    @param upright Set to enable use of upright descriptors (non rotation-invariant).\n    @param threshold Detector response threshold to accept point\n    @param nOctaves Maximum octave evolution of the image\n    @param nOctaveLayers Default number of sublevels per scale level\n    @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or\n    DIFF_CHARBONNIER\n     */\n    CV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false,\n                                    float threshold = 0.001f,\n                                    int nOctaves = 4, int nOctaveLayers = 4,\n                                    int diffusivity = KAZE::DIFF_PM_G2);\n\n    CV_WRAP virtual void setExtended(bool extended) = 0;\n    CV_WRAP virtual bool getExtended() const = 0;\n\n    CV_WRAP virtual void setUpright(bool upright) = 0;\n    CV_WRAP virtual bool getUpright() const = 0;\n\n    CV_WRAP virtual void setThreshold(double threshold) = 0;\n    CV_WRAP virtual double getThreshold() const = 0;\n\n    CV_WRAP virtual void setNOctaves(int octaves) = 0;\n    CV_WRAP virtual int getNOctaves() const = 0;\n\n    CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0;\n    CV_WRAP virtual int getNOctaveLayers() const = 0;\n\n    CV_WRAP virtual void setDiffusivity(int diff) = 0;\n    CV_WRAP virtual int getDiffusivity() const = 0;\n};\n\n/** @brief Class implementing the AKAZE keypoint detector and descriptor extractor, described in @cite ANB13 . :\n\n@note AKAZE descriptors can only be used with KAZE or AKAZE keypoints. Try to avoid using *extract*\nand *detect* instead of *operator()* due to performance reasons. .. [ANB13] Fast Explicit Diffusion\nfor Accelerated Features in Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien\nBartoli. In British Machine Vision Conference (BMVC), Bristol, UK, September 2013.\n */\nclass CV_EXPORTS_W AKAZE : public Feature2D\n{\npublic:\n    // AKAZE descriptor type\n    enum\n    {\n        DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation\n        DESCRIPTOR_KAZE = 3,\n        DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation\n        DESCRIPTOR_MLDB = 5\n    };\n\n    /** @brief The AKAZE constructor\n\n    @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,\n    DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.\n    @param descriptor_size Size of the descriptor in bits. 0 -\\> Full size\n    @param descriptor_channels Number of channels in the descriptor (1, 2, 3)\n    @param threshold Detector response threshold to accept point\n    @param nOctaves Maximum octave evolution of the image\n    @param nOctaveLayers Default number of sublevels per scale level\n    @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or\n    DIFF_CHARBONNIER\n     */\n    CV_WRAP static Ptr<AKAZE> create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB,\n                                     int descriptor_size = 0, int descriptor_channels = 3,\n                                     float threshold = 0.001f, int nOctaves = 4,\n                                     int nOctaveLayers = 4, int diffusivity = KAZE::DIFF_PM_G2);\n\n    CV_WRAP virtual void setDescriptorType(int dtype) = 0;\n    CV_WRAP virtual int getDescriptorType() const = 0;\n\n    CV_WRAP virtual void setDescriptorSize(int dsize) = 0;\n    CV_WRAP virtual int getDescriptorSize() const = 0;\n\n    CV_WRAP virtual void setDescriptorChannels(int dch) = 0;\n    CV_WRAP virtual int getDescriptorChannels() const = 0;\n\n    CV_WRAP virtual void setThreshold(double threshold) = 0;\n    CV_WRAP virtual double getThreshold() const = 0;\n\n    CV_WRAP virtual void setNOctaves(int octaves) = 0;\n    CV_WRAP virtual int getNOctaves() const = 0;\n\n    CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0;\n    CV_WRAP virtual int getNOctaveLayers() const = 0;\n\n    CV_WRAP virtual void setDiffusivity(int diff) = 0;\n    CV_WRAP virtual int getDiffusivity() const = 0;\n};\n\n//! @} features2d_main\n\n/****************************************************************************************\\\n*                                      Distance                                          *\n\\****************************************************************************************/\n\ntemplate<typename T>\nstruct CV_EXPORTS Accumulator\n{\n    typedef T Type;\n};\n\ntemplate<> struct Accumulator<unsigned char>  { typedef float Type; };\ntemplate<> struct Accumulator<unsigned short> { typedef float Type; };\ntemplate<> struct Accumulator<char>   { typedef float Type; };\ntemplate<> struct Accumulator<short>  { typedef float Type; };\n\n/*\n * Squared Euclidean distance functor\n */\ntemplate<class T>\nstruct CV_EXPORTS SL2\n{\n    enum { normType = NORM_L2SQR };\n    typedef T ValueType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    ResultType operator()( const T* a, const T* b, int size ) const\n    {\n        return normL2Sqr<ValueType, ResultType>(a, b, size);\n    }\n};\n\n/*\n * Euclidean distance functor\n */\ntemplate<class T>\nstruct CV_EXPORTS L2\n{\n    enum { normType = NORM_L2 };\n    typedef T ValueType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    ResultType operator()( const T* a, const T* b, int size ) const\n    {\n        return (ResultType)std::sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size));\n    }\n};\n\n/*\n * Manhattan distance (city block distance) functor\n */\ntemplate<class T>\nstruct CV_EXPORTS L1\n{\n    enum { normType = NORM_L1 };\n    typedef T ValueType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    ResultType operator()( const T* a, const T* b, int size ) const\n    {\n        return normL1<ValueType, ResultType>(a, b, size);\n    }\n};\n\n/****************************************************************************************\\\n*                                  DescriptorMatcher                                     *\n\\****************************************************************************************/\n\n//! @addtogroup features2d_match\n//! @{\n\n/** @brief Abstract base class for matching keypoint descriptors.\n\nIt has two groups of match methods: for matching descriptors of an image with another image or with\nan image set.\n */\nclass CV_EXPORTS_W DescriptorMatcher : public Algorithm\n{\npublic:\n    virtual ~DescriptorMatcher();\n\n    /** @brief Adds descriptors to train a CPU(trainDescCollectionis) or GPU(utrainDescCollectionis) descriptor\n    collection.\n\n    If the collection is not empty, the new descriptors are added to existing train descriptors.\n\n    @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same\n    train image.\n     */\n    CV_WRAP virtual void add( InputArrayOfArrays descriptors );\n\n    /** @brief Returns a constant link to the train descriptor collection trainDescCollection .\n     */\n    CV_WRAP const std::vector<Mat>& getTrainDescriptors() const;\n\n    /** @brief Clears the train descriptor collections.\n     */\n    CV_WRAP virtual void clear();\n\n    /** @brief Returns true if there are no train descriptors in the both collections.\n     */\n    CV_WRAP virtual bool empty() const;\n\n    /** @brief Returns true if the descriptor matcher supports masking permissible matches.\n     */\n    CV_WRAP virtual bool isMaskSupported() const = 0;\n\n    /** @brief Trains a descriptor matcher\n\n    Trains a descriptor matcher (for example, the flann index). In all methods to match, the method\n    train() is run every time before matching. Some descriptor matchers (for example, BruteForceMatcher)\n    have an empty implementation of this method. Other matchers really train their inner structures (for\n    example, FlannBasedMatcher trains flann::Index ).\n     */\n    CV_WRAP virtual void train();\n\n    /** @brief Finds the best match for each descriptor from a query set.\n\n    @param queryDescriptors Query set of descriptors.\n    @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors\n    collection stored in the class object.\n    @param matches Matches. If a query descriptor is masked out in mask , no match is added for this\n    descriptor. So, matches size may be smaller than the query descriptors count.\n    @param mask Mask specifying permissible matches between an input query and train matrices of\n    descriptors.\n\n    In the first variant of this method, the train descriptors are passed as an input argument. In the\n    second variant of the method, train descriptors collection that was set by DescriptorMatcher::add is\n    used. Optional mask (or masks) can be passed to specify which query and training descriptors can be\n    matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if\n    mask.at\\<uchar\\>(i,j) is non-zero.\n     */\n    CV_WRAP void match( InputArray queryDescriptors, InputArray trainDescriptors,\n                CV_OUT std::vector<DMatch>& matches, InputArray mask=noArray() ) const;\n\n    /** @brief Finds the k best matches for each descriptor from a query set.\n\n    @param queryDescriptors Query set of descriptors.\n    @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors\n    collection stored in the class object.\n    @param mask Mask specifying permissible matches between an input query and train matrices of\n    descriptors.\n    @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.\n    @param k Count of best matches found per each query descriptor or less if a query descriptor has\n    less than k possible matches in total.\n    @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is\n    false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,\n    the matches vector does not contain matches for fully masked-out query descriptors.\n\n    These extended variants of DescriptorMatcher::match methods find several best matches for each query\n    descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match\n    for the details about query and train descriptors.\n     */\n    CV_WRAP void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors,\n                   CV_OUT std::vector<std::vector<DMatch> >& matches, int k,\n                   InputArray mask=noArray(), bool compactResult=false ) const;\n\n    /** @brief For each query descriptor, finds the training descriptors not farther than the specified distance.\n\n    @param queryDescriptors Query set of descriptors.\n    @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors\n    collection stored in the class object.\n    @param matches Found matches.\n    @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is\n    false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,\n    the matches vector does not contain matches for fully masked-out query descriptors.\n    @param maxDistance Threshold for the distance between matched descriptors. Distance means here\n    metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured\n    in Pixels)!\n    @param mask Mask specifying permissible matches between an input query and train matrices of\n    descriptors.\n\n    For each query descriptor, the methods find such training descriptors that the distance between the\n    query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are\n    returned in the distance increasing order.\n     */\n    void radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors,\n                      std::vector<std::vector<DMatch> >& matches, float maxDistance,\n                      InputArray mask=noArray(), bool compactResult=false ) const;\n\n    /** @overload\n    @param queryDescriptors Query set of descriptors.\n    @param matches Matches. If a query descriptor is masked out in mask , no match is added for this\n    descriptor. So, matches size may be smaller than the query descriptors count.\n    @param masks Set of masks. Each masks[i] specifies permissible matches between the input query\n    descriptors and stored train descriptors from the i-th image trainDescCollection[i].\n    */\n    CV_WRAP void match( InputArray queryDescriptors, CV_OUT std::vector<DMatch>& matches,\n                        InputArrayOfArrays masks=noArray() );\n    /** @overload\n    @param queryDescriptors Query set of descriptors.\n    @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.\n    @param k Count of best matches found per each query descriptor or less if a query descriptor has\n    less than k possible matches in total.\n    @param masks Set of masks. Each masks[i] specifies permissible matches between the input query\n    descriptors and stored train descriptors from the i-th image trainDescCollection[i].\n    @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is\n    false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,\n    the matches vector does not contain matches for fully masked-out query descriptors.\n    */\n    CV_WRAP void knnMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, int k,\n                           InputArrayOfArrays masks=noArray(), bool compactResult=false );\n    /** @overload\n    @param queryDescriptors Query set of descriptors.\n    @param matches Found matches.\n    @param maxDistance Threshold for the distance between matched descriptors. Distance means here\n    metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured\n    in Pixels)!\n    @param masks Set of masks. Each masks[i] specifies permissible matches between the input query\n    descriptors and stored train descriptors from the i-th image trainDescCollection[i].\n    @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is\n    false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,\n    the matches vector does not contain matches for fully masked-out query descriptors.\n    */\n    void radiusMatch( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,\n                      InputArrayOfArrays masks=noArray(), bool compactResult=false );\n\n    // Reads matcher object from a file node\n    virtual void read( const FileNode& );\n    // Writes matcher object to a file storage\n    virtual void write( FileStorage& ) const;\n\n    /** @brief Clones the matcher.\n\n    @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object,\n    that is, copies both parameters and train data. If emptyTrainData is true, the method creates an\n    object copy with the current parameters but with empty train data.\n     */\n    virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const = 0;\n\n    /** @brief Creates a descriptor matcher of a given type with the default parameters (using default\n    constructor).\n\n    @param descriptorMatcherType Descriptor matcher type. Now the following matcher types are\n    supported:\n    -   `BruteForce` (it uses L2 )\n    -   `BruteForce-L1`\n    -   `BruteForce-Hamming`\n    -   `BruteForce-Hamming(2)`\n    -   `FlannBased`\n     */\n    CV_WRAP static Ptr<DescriptorMatcher> create( const String& descriptorMatcherType );\nprotected:\n    /**\n     * Class to work with descriptors from several images as with one merged matrix.\n     * It is used e.g. in FlannBasedMatcher.\n     */\n    class CV_EXPORTS DescriptorCollection\n    {\n    public:\n        DescriptorCollection();\n        DescriptorCollection( const DescriptorCollection& collection );\n        virtual ~DescriptorCollection();\n\n        // Vector of matrices \"descriptors\" will be merged to one matrix \"mergedDescriptors\" here.\n        void set( const std::vector<Mat>& descriptors );\n        virtual void clear();\n\n        const Mat& getDescriptors() const;\n        const Mat getDescriptor( int imgIdx, int localDescIdx ) const;\n        const Mat getDescriptor( int globalDescIdx ) const;\n        void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const;\n\n        int size() const;\n\n    protected:\n        Mat mergedDescriptors;\n        std::vector<int> startIdxs;\n    };\n\n    //! In fact the matching is implemented only by the following two methods. These methods suppose\n    //! that the class object has been trained already. Public match methods call these methods\n    //! after calling train().\n    virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,\n        InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0;\n    virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,\n        InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0;\n\n    static bool isPossibleMatch( InputArray mask, int queryIdx, int trainIdx );\n    static bool isMaskedOut( InputArrayOfArrays masks, int queryIdx );\n\n    static Mat clone_op( Mat m ) { return m.clone(); }\n    void checkMasks( InputArrayOfArrays masks, int queryDescriptorsCount ) const;\n\n    //! Collection of descriptors from train images.\n    std::vector<Mat> trainDescCollection;\n    std::vector<UMat> utrainDescCollection;\n};\n\n/** @brief Brute-force descriptor matcher.\n\nFor each descriptor in the first set, this matcher finds the closest descriptor in the second set\nby trying each one. This descriptor matcher supports masking permissible matches of descriptor\nsets.\n */\nclass CV_EXPORTS_W BFMatcher : public DescriptorMatcher\n{\npublic:\n    /** @brief Brute-force matcher constructor.\n\n    @param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are\n    preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and\n    BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor\n    description).\n    @param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k\n    nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with\n    k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the\n    matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent\n    pairs. Such technique usually produces best results with minimal number of outliers when there are\n    enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.\n     */\n    CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false );\n    virtual ~BFMatcher() {}\n\n    virtual bool isMaskSupported() const { return true; }\n\n    virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;\nprotected:\n    virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,\n        InputArrayOfArrays masks=noArray(), bool compactResult=false );\n    virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,\n        InputArrayOfArrays masks=noArray(), bool compactResult=false );\n\n    int normType;\n    bool crossCheck;\n};\n\n\n/** @brief Flann-based descriptor matcher.\n\nThis matcher trains flann::Index_ on a train descriptor collection and calls its nearest search\nmethods to find the best matches. So, this matcher may be faster when matching a large train\ncollection than the brute force matcher. FlannBasedMatcher does not support masking permissible\nmatches of descriptor sets because flann::Index does not support this. :\n */\nclass CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher\n{\npublic:\n    CV_WRAP FlannBasedMatcher( const Ptr<flann::IndexParams>& indexParams=makePtr<flann::KDTreeIndexParams>(),\n                       const Ptr<flann::SearchParams>& searchParams=makePtr<flann::SearchParams>() );\n\n    virtual void add( InputArrayOfArrays descriptors );\n    virtual void clear();\n\n    // Reads matcher object from a file node\n    virtual void read( const FileNode& );\n    // Writes matcher object to a file storage\n    virtual void write( FileStorage& ) const;\n\n    virtual void train();\n    virtual bool isMaskSupported() const;\n\n    virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;\nprotected:\n    static void convertToDMatches( const DescriptorCollection& descriptors,\n                                   const Mat& indices, const Mat& distances,\n                                   std::vector<std::vector<DMatch> >& matches );\n\n    virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,\n        InputArrayOfArrays masks=noArray(), bool compactResult=false );\n    virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,\n        InputArrayOfArrays masks=noArray(), bool compactResult=false );\n\n    Ptr<flann::IndexParams> indexParams;\n    Ptr<flann::SearchParams> searchParams;\n    Ptr<flann::Index> flannIndex;\n\n    DescriptorCollection mergedDescriptors;\n    int addedDescCount;\n};\n\n//! @} features2d_match\n\n/****************************************************************************************\\\n*                                   Drawing functions                                    *\n\\****************************************************************************************/\n\n//! @addtogroup features2d_draw\n//! @{\n\nstruct CV_EXPORTS DrawMatchesFlags\n{\n    enum{ DEFAULT = 0, //!< Output image matrix will be created (Mat::create),\n                       //!< i.e. existing memory of output image may be reused.\n                       //!< Two source image, matches and single keypoints will be drawn.\n                       //!< For each keypoint only the center point will be drawn (without\n                       //!< the circle around keypoint with keypoint size and orientation).\n          DRAW_OVER_OUTIMG = 1, //!< Output image matrix will not be created (Mat::create).\n                                //!< Matches will be drawn on existing content of output image.\n          NOT_DRAW_SINGLE_POINTS = 2, //!< Single keypoints will not be drawn.\n          DRAW_RICH_KEYPOINTS = 4 //!< For each keypoint the circle around keypoint with keypoint size and\n                                  //!< orientation will be drawn.\n        };\n};\n\n/** @brief Draws keypoints.\n\n@param image Source image.\n@param keypoints Keypoints from the source image.\n@param outImage Output image. Its content depends on the flags value defining what is drawn in the\noutput image. See possible flags bit values below.\n@param color Color of keypoints.\n@param flags Flags setting drawing features. Possible flags bit values are defined by\nDrawMatchesFlags. See details above in drawMatches .\n\n@note\nFor Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT,\ncv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,\ncv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS\n */\nCV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,\n                               const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT );\n\n/** @brief Draws the found matches of keypoints from two images.\n\n@param img1 First source image.\n@param keypoints1 Keypoints from the first source image.\n@param img2 Second source image.\n@param keypoints2 Keypoints from the second source image.\n@param matches1to2 Matches from the first image to the second one, which means that keypoints1[i]\nhas a corresponding point in keypoints2[matches[i]] .\n@param outImg Output image. Its content depends on the flags value defining what is drawn in the\noutput image. See possible flags bit values below.\n@param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1)\n, the color is generated randomly.\n@param singlePointColor Color of single keypoints (circles), which means that keypoints do not\nhave the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly.\n@param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are\ndrawn.\n@param flags Flags setting drawing features. Possible flags bit values are defined by\nDrawMatchesFlags.\n\nThis function draws matches of keypoints from two images in the output image. Match is a line\nconnecting two keypoints (circles). See cv::DrawMatchesFlags.\n */\nCV_EXPORTS_W void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,\n                             InputArray img2, const std::vector<KeyPoint>& keypoints2,\n                             const std::vector<DMatch>& matches1to2, InputOutputArray outImg,\n                             const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),\n                             const std::vector<char>& matchesMask=std::vector<char>(), int flags=DrawMatchesFlags::DEFAULT );\n\n/** @overload */\nCV_EXPORTS_AS(drawMatchesKnn) void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,\n                             InputArray img2, const std::vector<KeyPoint>& keypoints2,\n                             const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg,\n                             const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),\n                             const std::vector<std::vector<char> >& matchesMask=std::vector<std::vector<char> >(), int flags=DrawMatchesFlags::DEFAULT );\n\n//! @} features2d_draw\n\n/****************************************************************************************\\\n*   Functions to evaluate the feature detectors and [generic] descriptor extractors      *\n\\****************************************************************************************/\n\nCV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2,\n                                         std::vector<KeyPoint>* keypoints1, std::vector<KeyPoint>* keypoints2,\n                                         float& repeatability, int& correspCount,\n                                         const Ptr<FeatureDetector>& fdetector=Ptr<FeatureDetector>() );\n\nCV_EXPORTS void computeRecallPrecisionCurve( const std::vector<std::vector<DMatch> >& matches1to2,\n                                             const std::vector<std::vector<uchar> >& correctMatches1to2Mask,\n                                             std::vector<Point2f>& recallPrecisionCurve );\n\nCV_EXPORTS float getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );\nCV_EXPORTS int getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );\n\n/****************************************************************************************\\\n*                                     Bag of visual words                                *\n\\****************************************************************************************/\n\n//! @addtogroup features2d_category\n//! @{\n\n/** @brief Abstract base class for training the *bag of visual words* vocabulary from a set of descriptors.\n\nFor details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka,\nChristopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. :\n */\nclass CV_EXPORTS_W BOWTrainer\n{\npublic:\n    BOWTrainer();\n    virtual ~BOWTrainer();\n\n    /** @brief Adds descriptors to a training set.\n\n    @param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a\n    descriptor.\n\n    The training set is clustered using clustermethod to construct the vocabulary.\n     */\n    CV_WRAP void add( const Mat& descriptors );\n\n    /** @brief Returns a training set of descriptors.\n    */\n    CV_WRAP const std::vector<Mat>& getDescriptors() const;\n\n    /** @brief Returns the count of all descriptors stored in the training set.\n    */\n    CV_WRAP int descriptorsCount() const;\n\n    CV_WRAP virtual void clear();\n\n    /** @overload */\n    CV_WRAP virtual Mat cluster() const = 0;\n\n    /** @brief Clusters train descriptors.\n\n    @param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor.\n    Descriptors are not added to the inner train descriptor set.\n\n    The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first\n    variant of the method, train descriptors stored in the object are clustered. In the second variant,\n    input descriptors are clustered.\n     */\n    CV_WRAP virtual Mat cluster( const Mat& descriptors ) const = 0;\n\nprotected:\n    std::vector<Mat> descriptors;\n    int size;\n};\n\n/** @brief kmeans -based class to train visual vocabulary using the *bag of visual words* approach. :\n */\nclass CV_EXPORTS_W BOWKMeansTrainer : public BOWTrainer\n{\npublic:\n    /** @brief The constructor.\n\n    @see cv::kmeans\n    */\n    CV_WRAP BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(),\n                      int attempts=3, int flags=KMEANS_PP_CENTERS );\n    virtual ~BOWKMeansTrainer();\n\n    // Returns trained vocabulary (i.e. cluster centers).\n    CV_WRAP virtual Mat cluster() const;\n    CV_WRAP virtual Mat cluster( const Mat& descriptors ) const;\n\nprotected:\n\n    int clusterCount;\n    TermCriteria termcrit;\n    int attempts;\n    int flags;\n};\n\n/** @brief Class to compute an image descriptor using the *bag of visual words*.\n\nSuch a computation consists of the following steps:\n\n1.  Compute descriptors for a given image and its keypoints set.\n2.  Find the nearest visual words from the vocabulary for each keypoint descriptor.\n3.  Compute the bag-of-words image descriptor as is a normalized histogram of vocabulary words\nencountered in the image. The i-th bin of the histogram is a frequency of i-th word of the\nvocabulary in the given image.\n */\nclass CV_EXPORTS_W BOWImgDescriptorExtractor\n{\npublic:\n    /** @brief The constructor.\n\n    @param dextractor Descriptor extractor that is used to compute descriptors for an input image and\n    its keypoints.\n    @param dmatcher Descriptor matcher that is used to find the nearest word of the trained vocabulary\n    for each keypoint descriptor of the image.\n     */\n    CV_WRAP BOWImgDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor,\n                               const Ptr<DescriptorMatcher>& dmatcher );\n    /** @overload */\n    BOWImgDescriptorExtractor( const Ptr<DescriptorMatcher>& dmatcher );\n    virtual ~BOWImgDescriptorExtractor();\n\n    /** @brief Sets a visual vocabulary.\n\n    @param vocabulary Vocabulary (can be trained using the inheritor of BOWTrainer ). Each row of the\n    vocabulary is a visual word (cluster center).\n     */\n    CV_WRAP void setVocabulary( const Mat& vocabulary );\n\n    /** @brief Returns the set vocabulary.\n    */\n    CV_WRAP const Mat& getVocabulary() const;\n\n    /** @brief Computes an image descriptor using the set visual vocabulary.\n\n    @param image Image, for which the descriptor is computed.\n    @param keypoints Keypoints detected in the input image.\n    @param imgDescriptor Computed output image descriptor.\n    @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that\n    pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary)\n    returned if it is non-zero.\n    @param descriptors Descriptors of the image keypoints that are returned if they are non-zero.\n     */\n    void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor,\n                  std::vector<std::vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 );\n    /** @overload\n    @param keypointDescriptors Computed descriptors to match with vocabulary.\n    @param imgDescriptor Computed output image descriptor.\n    @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that\n    pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary)\n    returned if it is non-zero.\n    */\n    void compute( InputArray keypointDescriptors, OutputArray imgDescriptor,\n                  std::vector<std::vector<int> >* pointIdxsOfClusters=0 );\n    // compute() is not constant because DescriptorMatcher::match is not constant\n\n    CV_WRAP_AS(compute) void compute2( const Mat& image, std::vector<KeyPoint>& keypoints, CV_OUT Mat& imgDescriptor )\n    { compute(image,keypoints,imgDescriptor); }\n\n    /** @brief Returns an image descriptor size if the vocabulary is set. Otherwise, it returns 0.\n    */\n    CV_WRAP int descriptorSize() const;\n\n    /** @brief Returns an image descriptor type.\n     */\n    CV_WRAP int descriptorType() const;\n\nprotected:\n    Mat vocabulary;\n    Ptr<DescriptorExtractor> dextractor;\n    Ptr<DescriptorMatcher> dmatcher;\n};\n\n//! @} features2d_category\n\n//! @} features2d\n\n} /* namespace cv */\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/all_indices.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n\n#ifndef OPENCV_FLANN_ALL_INDICES_H_\n#define OPENCV_FLANN_ALL_INDICES_H_\n\n#include \"general.h\"\n\n#include \"nn_index.h\"\n#include \"kdtree_index.h\"\n#include \"kdtree_single_index.h\"\n#include \"kmeans_index.h\"\n#include \"composite_index.h\"\n#include \"linear_index.h\"\n#include \"hierarchical_clustering_index.h\"\n#include \"lsh_index.h\"\n#include \"autotuned_index.h\"\n\n\nnamespace cvflann\n{\n\ntemplate<typename KDTreeCapability, typename VectorSpace, typename Distance>\nstruct index_creator\n{\n    static NNIndex<Distance>* create(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)\n    {\n        flann_algorithm_t index_type = get_param<flann_algorithm_t>(params, \"algorithm\");\n\n        NNIndex<Distance>* nnIndex;\n        switch (index_type) {\n        case FLANN_INDEX_LINEAR:\n            nnIndex = new LinearIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_KDTREE_SINGLE:\n            nnIndex = new KDTreeSingleIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_KDTREE:\n            nnIndex = new KDTreeIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_KMEANS:\n            nnIndex = new KMeansIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_COMPOSITE:\n            nnIndex = new CompositeIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_AUTOTUNED:\n            nnIndex = new AutotunedIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_HIERARCHICAL:\n            nnIndex = new HierarchicalClusteringIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_LSH:\n            nnIndex = new LshIndex<Distance>(dataset, params, distance);\n            break;\n        default:\n            throw FLANNException(\"Unknown index type\");\n        }\n\n        return nnIndex;\n    }\n};\n\ntemplate<typename VectorSpace, typename Distance>\nstruct index_creator<False,VectorSpace,Distance>\n{\n    static NNIndex<Distance>* create(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)\n    {\n        flann_algorithm_t index_type = get_param<flann_algorithm_t>(params, \"algorithm\");\n\n        NNIndex<Distance>* nnIndex;\n        switch (index_type) {\n        case FLANN_INDEX_LINEAR:\n            nnIndex = new LinearIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_KMEANS:\n            nnIndex = new KMeansIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_HIERARCHICAL:\n            nnIndex = new HierarchicalClusteringIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_LSH:\n            nnIndex = new LshIndex<Distance>(dataset, params, distance);\n            break;\n        default:\n            throw FLANNException(\"Unknown index type\");\n        }\n\n        return nnIndex;\n    }\n};\n\ntemplate<typename Distance>\nstruct index_creator<False,False,Distance>\n{\n    static NNIndex<Distance>* create(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)\n    {\n        flann_algorithm_t index_type = get_param<flann_algorithm_t>(params, \"algorithm\");\n\n        NNIndex<Distance>* nnIndex;\n        switch (index_type) {\n        case FLANN_INDEX_LINEAR:\n            nnIndex = new LinearIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_HIERARCHICAL:\n            nnIndex = new HierarchicalClusteringIndex<Distance>(dataset, params, distance);\n            break;\n        case FLANN_INDEX_LSH:\n            nnIndex = new LshIndex<Distance>(dataset, params, distance);\n            break;\n        default:\n            throw FLANNException(\"Unknown index type\");\n        }\n\n        return nnIndex;\n    }\n};\n\ntemplate<typename Distance>\nNNIndex<Distance>* create_index_by_type(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)\n{\n    return index_creator<typename Distance::is_kdtree_distance,\n                         typename Distance::is_vector_space_distance,\n                         Distance>::create(dataset, params,distance);\n}\n\n}\n\n#endif /* OPENCV_FLANN_ALL_INDICES_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/allocator.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_ALLOCATOR_H_\n#define OPENCV_FLANN_ALLOCATOR_H_\n\n#include <stdlib.h>\n#include <stdio.h>\n\n\nnamespace cvflann\n{\n\n/**\n * Allocates (using C's malloc) a generic type T.\n *\n * Params:\n *     count = number of instances to allocate.\n * Returns: pointer (of type T*) to memory buffer\n */\ntemplate <typename T>\nT* allocate(size_t count = 1)\n{\n    T* mem = (T*) ::malloc(sizeof(T)*count);\n    return mem;\n}\n\n\n/**\n * Pooled storage allocator\n *\n * The following routines allow for the efficient allocation of storage in\n * small chunks from a specified pool.  Rather than allowing each structure\n * to be freed individually, an entire pool of storage is freed at once.\n * This method has two advantages over just using malloc() and free().  First,\n * it is far more efficient for allocating small objects, as there is\n * no overhead for remembering all the information needed to free each\n * object or consolidating fragmented memory.  Second, the decision about\n * how long to keep an object is made at the time of allocation, and there\n * is no need to track down all the objects to free them.\n *\n */\n\nconst size_t     WORDSIZE=16;\nconst  size_t     BLOCKSIZE=8192;\n\nclass PooledAllocator\n{\n    /* We maintain memory alignment to word boundaries by requiring that all\n        allocations be in multiples of the machine wordsize.  */\n    /* Size of machine word in bytes.  Must be power of 2. */\n    /* Minimum number of bytes requested at a time from\tthe system.  Must be multiple of WORDSIZE. */\n\n\n    int     remaining;  /* Number of bytes left in current block of storage. */\n    void*   base;     /* Pointer to base of current block of storage. */\n    void*   loc;      /* Current location in block to next allocate memory. */\n    int     blocksize;\n\n\npublic:\n    int     usedMemory;\n    int     wastedMemory;\n\n    /**\n        Default constructor. Initializes a new pool.\n     */\n    PooledAllocator(int blockSize = BLOCKSIZE)\n    {\n        blocksize = blockSize;\n        remaining = 0;\n        base = NULL;\n\n        usedMemory = 0;\n        wastedMemory = 0;\n    }\n\n    /**\n     * Destructor. Frees all the memory allocated in this pool.\n     */\n    ~PooledAllocator()\n    {\n        void* prev;\n\n        while (base != NULL) {\n            prev = *((void**) base); /* Get pointer to prev block. */\n            ::free(base);\n            base = prev;\n        }\n    }\n\n    /**\n     * Returns a pointer to a piece of new memory of the given size in bytes\n     * allocated from the pool.\n     */\n    void* allocateMemory(int size)\n    {\n        int blockSize;\n\n        /* Round size up to a multiple of wordsize.  The following expression\n            only works for WORDSIZE that is a power of 2, by masking last bits of\n            incremented size to zero.\n         */\n        size = (size + (WORDSIZE - 1)) & ~(WORDSIZE - 1);\n\n        /* Check whether a new block must be allocated.  Note that the first word\n            of a block is reserved for a pointer to the previous block.\n         */\n        if (size > remaining) {\n\n            wastedMemory += remaining;\n\n            /* Allocate new storage. */\n            blockSize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ?\n                        size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE;\n\n            // use the standard C malloc to allocate memory\n            void* m = ::malloc(blockSize);\n            if (!m) {\n                fprintf(stderr,\"Failed to allocate memory.\\n\");\n                return NULL;\n            }\n\n            /* Fill first word of new block with pointer to previous block. */\n            ((void**) m)[0] = base;\n            base = m;\n\n            int shift = 0;\n            //int shift = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1);\n\n            remaining = blockSize - sizeof(void*) - shift;\n            loc = ((char*)m + sizeof(void*) + shift);\n        }\n        void* rloc = loc;\n        loc = (char*)loc + size;\n        remaining -= size;\n\n        usedMemory += size;\n\n        return rloc;\n    }\n\n    /**\n     * Allocates (using this pool) a generic type T.\n     *\n     * Params:\n     *     count = number of instances to allocate.\n     * Returns: pointer (of type T*) to memory buffer\n     */\n    template <typename T>\n    T* allocate(size_t count = 1)\n    {\n        T* mem = (T*) this->allocateMemory((int)(sizeof(T)*count));\n        return mem;\n    }\n\n};\n\n}\n\n#endif //OPENCV_FLANN_ALLOCATOR_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/any.h",
    "content": "#ifndef OPENCV_FLANN_ANY_H_\n#define OPENCV_FLANN_ANY_H_\n/*\n * (C) Copyright Christopher Diggins 2005-2011\n * (C) Copyright Pablo Aguilar 2005\n * (C) Copyright Kevlin Henney 2001\n *\n * Distributed under the Boost Software License, Version 1.0. (See\n * accompanying file LICENSE_1_0.txt or copy at\n * http://www.boost.org/LICENSE_1_0.txt\n *\n * Adapted for FLANN by Marius Muja\n */\n\n#include \"defines.h\"\n#include <stdexcept>\n#include <ostream>\n#include <typeinfo>\n\nnamespace cvflann\n{\n\nnamespace anyimpl\n{\n\nstruct bad_any_cast\n{\n};\n\nstruct empty_any\n{\n};\n\ninline std::ostream& operator <<(std::ostream& out, const empty_any&)\n{\n    out << \"[empty_any]\";\n    return out;\n}\n\nstruct base_any_policy\n{\n    virtual void static_delete(void** x) = 0;\n    virtual void copy_from_value(void const* src, void** dest) = 0;\n    virtual void clone(void* const* src, void** dest) = 0;\n    virtual void move(void* const* src, void** dest) = 0;\n    virtual void* get_value(void** src) = 0;\n    virtual const void* get_value(void* const * src) = 0;\n    virtual ::size_t get_size() = 0;\n    virtual const std::type_info& type() = 0;\n    virtual void print(std::ostream& out, void* const* src) = 0;\n    virtual ~base_any_policy() {}\n};\n\ntemplate<typename T>\nstruct typed_base_any_policy : base_any_policy\n{\n    virtual ::size_t get_size() { return sizeof(T); }\n    virtual const std::type_info& type() { return typeid(T); }\n\n};\n\ntemplate<typename T>\nstruct small_any_policy : typed_base_any_policy<T>\n{\n    virtual void static_delete(void**) { }\n    virtual void copy_from_value(void const* src, void** dest)\n    {\n        new (dest) T(* reinterpret_cast<T const*>(src));\n    }\n    virtual void clone(void* const* src, void** dest) { *dest = *src; }\n    virtual void move(void* const* src, void** dest) { *dest = *src; }\n    virtual void* get_value(void** src) { return reinterpret_cast<void*>(src); }\n    virtual const void* get_value(void* const * src) { return reinterpret_cast<const void*>(src); }\n    virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast<T const*>(src); }\n};\n\ntemplate<typename T>\nstruct big_any_policy : typed_base_any_policy<T>\n{\n    virtual void static_delete(void** x)\n    {\n        if (* x) delete (* reinterpret_cast<T**>(x)); *x = NULL;\n    }\n    virtual void copy_from_value(void const* src, void** dest)\n    {\n        *dest = new T(*reinterpret_cast<T const*>(src));\n    }\n    virtual void clone(void* const* src, void** dest)\n    {\n        *dest = new T(**reinterpret_cast<T* const*>(src));\n    }\n    virtual void move(void* const* src, void** dest)\n    {\n        (*reinterpret_cast<T**>(dest))->~T();\n        **reinterpret_cast<T**>(dest) = **reinterpret_cast<T* const*>(src);\n    }\n    virtual void* get_value(void** src) { return *src; }\n    virtual const void* get_value(void* const * src) { return *src; }\n    virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast<T const*>(*src); }\n};\n\ntemplate<> inline void big_any_policy<flann_centers_init_t>::print(std::ostream& out, void* const* src)\n{\n    out << int(*reinterpret_cast<flann_centers_init_t const*>(*src));\n}\n\ntemplate<> inline void big_any_policy<flann_algorithm_t>::print(std::ostream& out, void* const* src)\n{\n    out << int(*reinterpret_cast<flann_algorithm_t const*>(*src));\n}\n\ntemplate<> inline void big_any_policy<cv::String>::print(std::ostream& out, void* const* src)\n{\n    out << (*reinterpret_cast<cv::String const*>(*src)).c_str();\n}\n\ntemplate<typename T>\nstruct choose_policy\n{\n    typedef big_any_policy<T> type;\n};\n\ntemplate<typename T>\nstruct choose_policy<T*>\n{\n    typedef small_any_policy<T*> type;\n};\n\nstruct any;\n\n/// Choosing the policy for an any type is illegal, but should never happen.\n/// This is designed to throw a compiler error.\ntemplate<>\nstruct choose_policy<any>\n{\n    typedef void type;\n};\n\n/// Specializations for small types.\n#define SMALL_POLICY(TYPE) \\\n    template<> \\\n    struct choose_policy<TYPE> { typedef small_any_policy<TYPE> type; \\\n    }\n\nSMALL_POLICY(signed char);\nSMALL_POLICY(unsigned char);\nSMALL_POLICY(signed short);\nSMALL_POLICY(unsigned short);\nSMALL_POLICY(signed int);\nSMALL_POLICY(unsigned int);\nSMALL_POLICY(signed long);\nSMALL_POLICY(unsigned long);\nSMALL_POLICY(float);\nSMALL_POLICY(bool);\n\n#undef SMALL_POLICY\n\ntemplate <typename T>\nclass SinglePolicy\n{\n    SinglePolicy();\n    SinglePolicy(const SinglePolicy& other);\n    SinglePolicy& operator=(const SinglePolicy& other);\n\npublic:\n    static base_any_policy* get_policy();\n\nprivate:\n    static typename choose_policy<T>::type policy;\n};\n\ntemplate <typename T>\ntypename choose_policy<T>::type SinglePolicy<T>::policy;\n\n/// This function will return a different policy for each type.\ntemplate <typename T>\ninline base_any_policy* SinglePolicy<T>::get_policy() { return &policy; }\n\n} // namespace anyimpl\n\nstruct any\n{\nprivate:\n    // fields\n    anyimpl::base_any_policy* policy;\n    void* object;\n\npublic:\n    /// Initializing constructor.\n    template <typename T>\n    any(const T& x)\n        : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)\n    {\n        assign(x);\n    }\n\n    /// Empty constructor.\n    any()\n        : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)\n    { }\n\n    /// Special initializing constructor for string literals.\n    any(const char* x)\n        : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)\n    {\n        assign(x);\n    }\n\n    /// Copy constructor.\n    any(const any& x)\n        : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)\n    {\n        assign(x);\n    }\n\n    /// Destructor.\n    ~any()\n    {\n        policy->static_delete(&object);\n    }\n\n    /// Assignment function from another any.\n    any& assign(const any& x)\n    {\n        reset();\n        policy = x.policy;\n        policy->clone(&x.object, &object);\n        return *this;\n    }\n\n    /// Assignment function.\n    template <typename T>\n    any& assign(const T& x)\n    {\n        reset();\n        policy = anyimpl::SinglePolicy<T>::get_policy();\n        policy->copy_from_value(&x, &object);\n        return *this;\n    }\n\n    /// Assignment operator.\n    template<typename T>\n    any& operator=(const T& x)\n    {\n        return assign(x);\n    }\n\n    /// Assignment operator, specialed for literal strings.\n    /// They have types like const char [6] which don't work as expected.\n    any& operator=(const char* x)\n    {\n        return assign(x);\n    }\n\n    /// Utility functions\n    any& swap(any& x)\n    {\n        std::swap(policy, x.policy);\n        std::swap(object, x.object);\n        return *this;\n    }\n\n    /// Cast operator. You can only cast to the original type.\n    template<typename T>\n    T& cast()\n    {\n        if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast();\n        T* r = reinterpret_cast<T*>(policy->get_value(&object));\n        return *r;\n    }\n\n    /// Cast operator. You can only cast to the original type.\n    template<typename T>\n    const T& cast() const\n    {\n        if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast();\n        const T* r = reinterpret_cast<const T*>(policy->get_value(&object));\n        return *r;\n    }\n\n    /// Returns true if the any contains no value.\n    bool empty() const\n    {\n        return policy->type() == typeid(anyimpl::empty_any);\n    }\n\n    /// Frees any allocated memory, and sets the value to NULL.\n    void reset()\n    {\n        policy->static_delete(&object);\n        policy = anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy();\n    }\n\n    /// Returns true if the two types are the same.\n    bool compatible(const any& x) const\n    {\n        return policy->type() == x.policy->type();\n    }\n\n    /// Returns if the type is compatible with the policy\n    template<typename T>\n    bool has_type()\n    {\n        return policy->type() == typeid(T);\n    }\n\n    const std::type_info& type() const\n    {\n        return policy->type();\n    }\n\n    friend std::ostream& operator <<(std::ostream& out, const any& any_val);\n};\n\ninline std::ostream& operator <<(std::ostream& out, const any& any_val)\n{\n    any_val.policy->print(out,&any_val.object);\n    return out;\n}\n\n}\n\n#endif // OPENCV_FLANN_ANY_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/autotuned_index.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n#ifndef OPENCV_FLANN_AUTOTUNED_INDEX_H_\n#define OPENCV_FLANN_AUTOTUNED_INDEX_H_\n\n#include \"general.h\"\n#include \"nn_index.h\"\n#include \"ground_truth.h\"\n#include \"index_testing.h\"\n#include \"sampling.h\"\n#include \"kdtree_index.h\"\n#include \"kdtree_single_index.h\"\n#include \"kmeans_index.h\"\n#include \"composite_index.h\"\n#include \"linear_index.h\"\n#include \"logger.h\"\n\nnamespace cvflann\n{\n\ntemplate<typename Distance>\nNNIndex<Distance>* create_index_by_type(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance);\n\n\nstruct AutotunedIndexParams : public IndexParams\n{\n    AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, float memory_weight = 0, float sample_fraction = 0.1)\n    {\n        (*this)[\"algorithm\"] = FLANN_INDEX_AUTOTUNED;\n        // precision desired (used for autotuning, -1 otherwise)\n        (*this)[\"target_precision\"] = target_precision;\n        // build tree time weighting factor\n        (*this)[\"build_weight\"] = build_weight;\n        // index memory weighting factor\n        (*this)[\"memory_weight\"] = memory_weight;\n        // what fraction of the dataset to use for autotuning\n        (*this)[\"sample_fraction\"] = sample_fraction;\n    }\n};\n\n\ntemplate <typename Distance>\nclass AutotunedIndex : public NNIndex<Distance>\n{\npublic:\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\n    AutotunedIndex(const Matrix<ElementType>& inputData, const IndexParams& params = AutotunedIndexParams(), Distance d = Distance()) :\n        dataset_(inputData), distance_(d)\n    {\n        target_precision_ = get_param(params, \"target_precision\",0.8f);\n        build_weight_ =  get_param(params,\"build_weight\", 0.01f);\n        memory_weight_ = get_param(params, \"memory_weight\", 0.0f);\n        sample_fraction_ = get_param(params,\"sample_fraction\", 0.1f);\n        bestIndex_ = NULL;\n    }\n\n    AutotunedIndex(const AutotunedIndex&);\n    AutotunedIndex& operator=(const AutotunedIndex&);\n\n    virtual ~AutotunedIndex()\n    {\n        if (bestIndex_ != NULL) {\n            delete bestIndex_;\n            bestIndex_ = NULL;\n        }\n    }\n\n    /**\n     *          Method responsible with building the index.\n     */\n    virtual void buildIndex()\n    {\n        std::ostringstream stream;\n        bestParams_ = estimateBuildParams();\n        print_params(bestParams_, stream);\n        Logger::info(\"----------------------------------------------------\\n\");\n        Logger::info(\"Autotuned parameters:\\n\");\n        Logger::info(\"%s\", stream.str().c_str());\n        Logger::info(\"----------------------------------------------------\\n\");\n\n        bestIndex_ = create_index_by_type(dataset_, bestParams_, distance_);\n        bestIndex_->buildIndex();\n        speedup_ = estimateSearchParams(bestSearchParams_);\n        stream.str(std::string());\n        print_params(bestSearchParams_, stream);\n        Logger::info(\"----------------------------------------------------\\n\");\n        Logger::info(\"Search parameters:\\n\");\n        Logger::info(\"%s\", stream.str().c_str());\n        Logger::info(\"----------------------------------------------------\\n\");\n    }\n\n    /**\n     *  Saves the index to a stream\n     */\n    virtual void saveIndex(FILE* stream)\n    {\n        save_value(stream, (int)bestIndex_->getType());\n        bestIndex_->saveIndex(stream);\n        save_value(stream, get_param<int>(bestSearchParams_, \"checks\"));\n    }\n\n    /**\n     *  Loads the index from a stream\n     */\n    virtual void loadIndex(FILE* stream)\n    {\n        int index_type;\n\n        load_value(stream, index_type);\n        IndexParams params;\n        params[\"algorithm\"] = (flann_algorithm_t)index_type;\n        bestIndex_ = create_index_by_type<Distance>(dataset_, params, distance_);\n        bestIndex_->loadIndex(stream);\n        int checks;\n        load_value(stream, checks);\n        bestSearchParams_[\"checks\"] = checks;\n    }\n\n    /**\n     *      Method that searches for nearest-neighbors\n     */\n    virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)\n    {\n        int checks = get_param<int>(searchParams,\"checks\",FLANN_CHECKS_AUTOTUNED);\n        if (checks == FLANN_CHECKS_AUTOTUNED) {\n            bestIndex_->findNeighbors(result, vec, bestSearchParams_);\n        }\n        else {\n            bestIndex_->findNeighbors(result, vec, searchParams);\n        }\n    }\n\n\n    IndexParams getParameters() const\n    {\n        return bestIndex_->getParameters();\n    }\n\n    SearchParams getSearchParameters() const\n    {\n        return bestSearchParams_;\n    }\n\n    float getSpeedup() const\n    {\n        return speedup_;\n    }\n\n\n    /**\n     *      Number of features in this index.\n     */\n    virtual size_t size() const\n    {\n        return bestIndex_->size();\n    }\n\n    /**\n     *  The length of each vector in this index.\n     */\n    virtual size_t veclen() const\n    {\n        return bestIndex_->veclen();\n    }\n\n    /**\n     * The amount of memory (in bytes) this index uses.\n     */\n    virtual int usedMemory() const\n    {\n        return bestIndex_->usedMemory();\n    }\n\n    /**\n     * Algorithm name\n     */\n    virtual flann_algorithm_t getType() const\n    {\n        return FLANN_INDEX_AUTOTUNED;\n    }\n\nprivate:\n\n    struct CostData\n    {\n        float searchTimeCost;\n        float buildTimeCost;\n        float memoryCost;\n        float totalCost;\n        IndexParams params;\n    };\n\n    void evaluate_kmeans(CostData& cost)\n    {\n        StartStopTimer t;\n        int checks;\n        const int nn = 1;\n\n        Logger::info(\"KMeansTree using params: max_iterations=%d, branching=%d\\n\",\n                     get_param<int>(cost.params,\"iterations\"),\n                     get_param<int>(cost.params,\"branching\"));\n        KMeansIndex<Distance> kmeans(sampledDataset_, cost.params, distance_);\n        // measure index build time\n        t.start();\n        kmeans.buildIndex();\n        t.stop();\n        float buildTime = (float)t.value;\n\n        // measure search time\n        float searchTime = test_index_precision(kmeans, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn);\n\n        float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float));\n        cost.memoryCost = (kmeans.usedMemory() + datasetMemory) / datasetMemory;\n        cost.searchTimeCost = searchTime;\n        cost.buildTimeCost = buildTime;\n        Logger::info(\"KMeansTree buildTime=%g, searchTime=%g, build_weight=%g\\n\", buildTime, searchTime, build_weight_);\n    }\n\n\n    void evaluate_kdtree(CostData& cost)\n    {\n        StartStopTimer t;\n        int checks;\n        const int nn = 1;\n\n        Logger::info(\"KDTree using params: trees=%d\\n\", get_param<int>(cost.params,\"trees\"));\n        KDTreeIndex<Distance> kdtree(sampledDataset_, cost.params, distance_);\n\n        t.start();\n        kdtree.buildIndex();\n        t.stop();\n        float buildTime = (float)t.value;\n\n        //measure search time\n        float searchTime = test_index_precision(kdtree, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn);\n\n        float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float));\n        cost.memoryCost = (kdtree.usedMemory() + datasetMemory) / datasetMemory;\n        cost.searchTimeCost = searchTime;\n        cost.buildTimeCost = buildTime;\n        Logger::info(\"KDTree buildTime=%g, searchTime=%g\\n\", buildTime, searchTime);\n    }\n\n\n    //    struct KMeansSimpleDownhillFunctor {\n    //\n    //        Autotune& autotuner;\n    //        KMeansSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}\n    //\n    //        float operator()(int* params) {\n    //\n    //            float maxFloat = numeric_limits<float>::max();\n    //\n    //            if (params[0]<2) return maxFloat;\n    //            if (params[1]<0) return maxFloat;\n    //\n    //            CostData c;\n    //            c.params[\"algorithm\"] = KMEANS;\n    //            c.params[\"centers-init\"] = CENTERS_RANDOM;\n    //            c.params[\"branching\"] = params[0];\n    //            c.params[\"max-iterations\"] = params[1];\n    //\n    //            autotuner.evaluate_kmeans(c);\n    //\n    //            return c.timeCost;\n    //\n    //        }\n    //    };\n    //\n    //    struct KDTreeSimpleDownhillFunctor {\n    //\n    //        Autotune& autotuner;\n    //        KDTreeSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}\n    //\n    //        float operator()(int* params) {\n    //            float maxFloat = numeric_limits<float>::max();\n    //\n    //            if (params[0]<1) return maxFloat;\n    //\n    //            CostData c;\n    //            c.params[\"algorithm\"] = KDTREE;\n    //            c.params[\"trees\"] = params[0];\n    //\n    //            autotuner.evaluate_kdtree(c);\n    //\n    //            return c.timeCost;\n    //\n    //        }\n    //    };\n\n\n\n    void optimizeKMeans(std::vector<CostData>& costs)\n    {\n        Logger::info(\"KMEANS, Step 1: Exploring parameter space\\n\");\n\n        // explore kmeans parameters space using combinations of the parameters below\n        int maxIterations[] = { 1, 5, 10, 15 };\n        int branchingFactors[] = { 16, 32, 64, 128, 256 };\n\n        int kmeansParamSpaceSize = FLANN_ARRAY_LEN(maxIterations) * FLANN_ARRAY_LEN(branchingFactors);\n        costs.reserve(costs.size() + kmeansParamSpaceSize);\n\n        // evaluate kmeans for all parameter combinations\n        for (size_t i = 0; i < FLANN_ARRAY_LEN(maxIterations); ++i) {\n            for (size_t j = 0; j < FLANN_ARRAY_LEN(branchingFactors); ++j) {\n                CostData cost;\n                cost.params[\"algorithm\"] = FLANN_INDEX_KMEANS;\n                cost.params[\"centers_init\"] = FLANN_CENTERS_RANDOM;\n                cost.params[\"iterations\"] = maxIterations[i];\n                cost.params[\"branching\"] = branchingFactors[j];\n\n                evaluate_kmeans(cost);\n                costs.push_back(cost);\n            }\n        }\n\n        //         Logger::info(\"KMEANS, Step 2: simplex-downhill optimization\\n\");\n        //\n        //         const int n = 2;\n        //         // choose initial simplex points as the best parameters so far\n        //         int kmeansNMPoints[n*(n+1)];\n        //         float kmeansVals[n+1];\n        //         for (int i=0;i<n+1;++i) {\n        //             kmeansNMPoints[i*n] = (int)kmeansCosts[i].params[\"branching\"];\n        //             kmeansNMPoints[i*n+1] = (int)kmeansCosts[i].params[\"max-iterations\"];\n        //             kmeansVals[i] = kmeansCosts[i].timeCost;\n        //         }\n        //         KMeansSimpleDownhillFunctor kmeans_cost_func(*this);\n        //         // run optimization\n        //         optimizeSimplexDownhill(kmeansNMPoints,n,kmeans_cost_func,kmeansVals);\n        //         // store results\n        //         for (int i=0;i<n+1;++i) {\n        //             kmeansCosts[i].params[\"branching\"] = kmeansNMPoints[i*2];\n        //             kmeansCosts[i].params[\"max-iterations\"] = kmeansNMPoints[i*2+1];\n        //             kmeansCosts[i].timeCost = kmeansVals[i];\n        //         }\n    }\n\n\n    void optimizeKDTree(std::vector<CostData>& costs)\n    {\n        Logger::info(\"KD-TREE, Step 1: Exploring parameter space\\n\");\n\n        // explore kd-tree parameters space using the parameters below\n        int testTrees[] = { 1, 4, 8, 16, 32 };\n\n        // evaluate kdtree for all parameter combinations\n        for (size_t i = 0; i < FLANN_ARRAY_LEN(testTrees); ++i) {\n            CostData cost;\n            cost.params[\"algorithm\"] = FLANN_INDEX_KDTREE;\n            cost.params[\"trees\"] = testTrees[i];\n\n            evaluate_kdtree(cost);\n            costs.push_back(cost);\n        }\n\n        //         Logger::info(\"KD-TREE, Step 2: simplex-downhill optimization\\n\");\n        //\n        //         const int n = 1;\n        //         // choose initial simplex points as the best parameters so far\n        //         int kdtreeNMPoints[n*(n+1)];\n        //         float kdtreeVals[n+1];\n        //         for (int i=0;i<n+1;++i) {\n        //             kdtreeNMPoints[i] = (int)kdtreeCosts[i].params[\"trees\"];\n        //             kdtreeVals[i] = kdtreeCosts[i].timeCost;\n        //         }\n        //         KDTreeSimpleDownhillFunctor kdtree_cost_func(*this);\n        //         // run optimization\n        //         optimizeSimplexDownhill(kdtreeNMPoints,n,kdtree_cost_func,kdtreeVals);\n        //         // store results\n        //         for (int i=0;i<n+1;++i) {\n        //             kdtreeCosts[i].params[\"trees\"] = kdtreeNMPoints[i];\n        //             kdtreeCosts[i].timeCost = kdtreeVals[i];\n        //         }\n    }\n\n    /**\n     *  Chooses the best nearest-neighbor algorithm and estimates the optimal\n     *  parameters to use when building the index (for a given precision).\n     *  Returns a dictionary with the optimal parameters.\n     */\n    IndexParams estimateBuildParams()\n    {\n        std::vector<CostData> costs;\n\n        int sampleSize = int(sample_fraction_ * dataset_.rows);\n        int testSampleSize = std::min(sampleSize / 10, 1000);\n\n        Logger::info(\"Entering autotuning, dataset size: %d, sampleSize: %d, testSampleSize: %d, target precision: %g\\n\", dataset_.rows, sampleSize, testSampleSize, target_precision_);\n\n        // For a very small dataset, it makes no sense to build any fancy index, just\n        // use linear search\n        if (testSampleSize < 10) {\n            Logger::info(\"Choosing linear, dataset too small\\n\");\n            return LinearIndexParams();\n        }\n\n        // We use a fraction of the original dataset to speedup the autotune algorithm\n        sampledDataset_ = random_sample(dataset_, sampleSize);\n        // We use a cross-validation approach, first we sample a testset from the dataset\n        testDataset_ = random_sample(sampledDataset_, testSampleSize, true);\n\n        // We compute the ground truth using linear search\n        Logger::info(\"Computing ground truth... \\n\");\n        gt_matches_ = Matrix<int>(new int[testDataset_.rows], testDataset_.rows, 1);\n        StartStopTimer t;\n        t.start();\n        compute_ground_truth<Distance>(sampledDataset_, testDataset_, gt_matches_, 0, distance_);\n        t.stop();\n\n        CostData linear_cost;\n        linear_cost.searchTimeCost = (float)t.value;\n        linear_cost.buildTimeCost = 0;\n        linear_cost.memoryCost = 0;\n        linear_cost.params[\"algorithm\"] = FLANN_INDEX_LINEAR;\n\n        costs.push_back(linear_cost);\n\n        // Start parameter autotune process\n        Logger::info(\"Autotuning parameters...\\n\");\n\n        optimizeKMeans(costs);\n        optimizeKDTree(costs);\n\n        float bestTimeCost = costs[0].searchTimeCost;\n        for (size_t i = 0; i < costs.size(); ++i) {\n            float timeCost = costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost;\n            if (timeCost < bestTimeCost) {\n                bestTimeCost = timeCost;\n            }\n        }\n\n        float bestCost = costs[0].searchTimeCost / bestTimeCost;\n        IndexParams bestParams = costs[0].params;\n        if (bestTimeCost > 0) {\n            for (size_t i = 0; i < costs.size(); ++i) {\n                float crtCost = (costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost) / bestTimeCost +\n                                memory_weight_ * costs[i].memoryCost;\n                if (crtCost < bestCost) {\n                    bestCost = crtCost;\n                    bestParams = costs[i].params;\n                }\n            }\n        }\n\n        delete[] gt_matches_.data;\n        delete[] testDataset_.data;\n        delete[] sampledDataset_.data;\n\n        return bestParams;\n    }\n\n\n\n    /**\n     *  Estimates the search time parameters needed to get the desired precision.\n     *  Precondition: the index is built\n     *  Postcondition: the searchParams will have the optimum params set, also the speedup obtained over linear search.\n     */\n    float estimateSearchParams(SearchParams& searchParams)\n    {\n        const int nn = 1;\n        const size_t SAMPLE_COUNT = 1000;\n\n        assert(bestIndex_ != NULL); // must have a valid index\n\n        float speedup = 0;\n\n        int samples = (int)std::min(dataset_.rows / 10, SAMPLE_COUNT);\n        if (samples > 0) {\n            Matrix<ElementType> testDataset = random_sample(dataset_, samples);\n\n            Logger::info(\"Computing ground truth\\n\");\n\n            // we need to compute the ground truth first\n            Matrix<int> gt_matches(new int[testDataset.rows], testDataset.rows, 1);\n            StartStopTimer t;\n            t.start();\n            compute_ground_truth<Distance>(dataset_, testDataset, gt_matches, 1, distance_);\n            t.stop();\n            float linear = (float)t.value;\n\n            int checks;\n            Logger::info(\"Estimating number of checks\\n\");\n\n            float searchTime;\n            float cb_index;\n            if (bestIndex_->getType() == FLANN_INDEX_KMEANS) {\n                Logger::info(\"KMeans algorithm, estimating cluster border factor\\n\");\n                KMeansIndex<Distance>* kmeans = (KMeansIndex<Distance>*)bestIndex_;\n                float bestSearchTime = -1;\n                float best_cb_index = -1;\n                int best_checks = -1;\n                for (cb_index = 0; cb_index < 1.1f; cb_index += 0.2f) {\n                    kmeans->set_cb_index(cb_index);\n                    searchTime = test_index_precision(*kmeans, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1);\n                    if ((searchTime < bestSearchTime) || (bestSearchTime == -1)) {\n                        bestSearchTime = searchTime;\n                        best_cb_index = cb_index;\n                        best_checks = checks;\n                    }\n                }\n                searchTime = bestSearchTime;\n                cb_index = best_cb_index;\n                checks = best_checks;\n\n                kmeans->set_cb_index(best_cb_index);\n                Logger::info(\"Optimum cb_index: %g\\n\", cb_index);\n                bestParams_[\"cb_index\"] = cb_index;\n            }\n            else {\n                searchTime = test_index_precision(*bestIndex_, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1);\n            }\n\n            Logger::info(\"Required number of checks: %d \\n\", checks);\n            searchParams[\"checks\"] = checks;\n\n            speedup = linear / searchTime;\n\n            delete[] gt_matches.data;\n            delete[] testDataset.data;\n        }\n\n        return speedup;\n    }\n\nprivate:\n    NNIndex<Distance>* bestIndex_;\n\n    IndexParams bestParams_;\n    SearchParams bestSearchParams_;\n\n    Matrix<ElementType> sampledDataset_;\n    Matrix<ElementType> testDataset_;\n    Matrix<int> gt_matches_;\n\n    float speedup_;\n\n    /**\n     * The dataset used by this index\n     */\n    const Matrix<ElementType> dataset_;\n\n    /**\n     * Index parameters\n     */\n    float target_precision_;\n    float build_weight_;\n    float memory_weight_;\n    float sample_fraction_;\n\n    Distance distance_;\n\n\n};\n}\n\n#endif /* OPENCV_FLANN_AUTOTUNED_INDEX_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/composite_index.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_COMPOSITE_INDEX_H_\n#define OPENCV_FLANN_COMPOSITE_INDEX_H_\n\n#include \"general.h\"\n#include \"nn_index.h\"\n#include \"kdtree_index.h\"\n#include \"kmeans_index.h\"\n\nnamespace cvflann\n{\n\n/**\n * Index parameters for the CompositeIndex.\n */\nstruct CompositeIndexParams : public IndexParams\n{\n    CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11,\n                         flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 )\n    {\n        (*this)[\"algorithm\"] = FLANN_INDEX_KMEANS;\n        // number of randomized trees to use (for kdtree)\n        (*this)[\"trees\"] = trees;\n        // branching factor\n        (*this)[\"branching\"] = branching;\n        // max iterations to perform in one kmeans clustering (kmeans tree)\n        (*this)[\"iterations\"] = iterations;\n        // algorithm used for picking the initial cluster centers for kmeans tree\n        (*this)[\"centers_init\"] = centers_init;\n        // cluster boundary index. Used when searching the kmeans tree\n        (*this)[\"cb_index\"] = cb_index;\n    }\n};\n\n\n/**\n * This index builds a kd-tree index and a k-means index and performs nearest\n * neighbour search both indexes. This gives a slight boost in search performance\n * as some of the neighbours that are missed by one index are found by the other.\n */\ntemplate <typename Distance>\nclass CompositeIndex : public NNIndex<Distance>\n{\npublic:\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\n    /**\n     * Index constructor\n     * @param inputData dataset containing the points to index\n     * @param params Index parameters\n     * @param d Distance functor\n     * @return\n     */\n    CompositeIndex(const Matrix<ElementType>& inputData, const IndexParams& params = CompositeIndexParams(),\n                   Distance d = Distance()) : index_params_(params)\n    {\n        kdtree_index_ = new KDTreeIndex<Distance>(inputData, params, d);\n        kmeans_index_ = new KMeansIndex<Distance>(inputData, params, d);\n\n    }\n\n    CompositeIndex(const CompositeIndex&);\n    CompositeIndex& operator=(const CompositeIndex&);\n\n    virtual ~CompositeIndex()\n    {\n        delete kdtree_index_;\n        delete kmeans_index_;\n    }\n\n    /**\n     * @return The index type\n     */\n    flann_algorithm_t getType() const\n    {\n        return FLANN_INDEX_COMPOSITE;\n    }\n\n    /**\n     * @return Size of the index\n     */\n    size_t size() const\n    {\n        return kdtree_index_->size();\n    }\n\n    /**\n     * \\returns The dimensionality of the features in this index.\n     */\n    size_t veclen() const\n    {\n        return kdtree_index_->veclen();\n    }\n\n    /**\n     * \\returns The amount of memory (in bytes) used by the index.\n     */\n    int usedMemory() const\n    {\n        return kmeans_index_->usedMemory() + kdtree_index_->usedMemory();\n    }\n\n    /**\n     * \\brief Builds the index\n     */\n    void buildIndex()\n    {\n        Logger::info(\"Building kmeans tree...\\n\");\n        kmeans_index_->buildIndex();\n        Logger::info(\"Building kdtree tree...\\n\");\n        kdtree_index_->buildIndex();\n    }\n\n    /**\n     * \\brief Saves the index to a stream\n     * \\param stream The stream to save the index to\n     */\n    void saveIndex(FILE* stream)\n    {\n        kmeans_index_->saveIndex(stream);\n        kdtree_index_->saveIndex(stream);\n    }\n\n    /**\n     * \\brief Loads the index from a stream\n     * \\param stream The stream from which the index is loaded\n     */\n    void loadIndex(FILE* stream)\n    {\n        kmeans_index_->loadIndex(stream);\n        kdtree_index_->loadIndex(stream);\n    }\n\n    /**\n     * \\returns The index parameters\n     */\n    IndexParams getParameters() const\n    {\n        return index_params_;\n    }\n\n    /**\n     * \\brief Method that searches for nearest-neighbours\n     */\n    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)\n    {\n        kmeans_index_->findNeighbors(result, vec, searchParams);\n        kdtree_index_->findNeighbors(result, vec, searchParams);\n    }\n\nprivate:\n    /** The k-means index */\n    KMeansIndex<Distance>* kmeans_index_;\n\n    /** The kd-tree index */\n    KDTreeIndex<Distance>* kdtree_index_;\n\n    /** The index parameters */\n    const IndexParams index_params_;\n};\n\n}\n\n#endif //OPENCV_FLANN_COMPOSITE_INDEX_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/config.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2011  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2011  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n\n#ifndef OPENCV_FLANN_CONFIG_H_\n#define OPENCV_FLANN_CONFIG_H_\n\n#ifdef FLANN_VERSION_\n#undef FLANN_VERSION_\n#endif\n#define FLANN_VERSION_ \"1.6.10\"\n\n#endif /* OPENCV_FLANN_CONFIG_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/defines.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2011  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2011  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n\n#ifndef OPENCV_FLANN_DEFINES_H_\n#define OPENCV_FLANN_DEFINES_H_\n\n#include \"config.h\"\n\n#ifdef FLANN_EXPORT\n#undef FLANN_EXPORT\n#endif\n#ifdef WIN32\n/* win32 dll export/import directives */\n #ifdef FLANN_EXPORTS\n  #define FLANN_EXPORT __declspec(dllexport)\n #elif defined(FLANN_STATIC)\n  #define FLANN_EXPORT\n #else\n  #define FLANN_EXPORT __declspec(dllimport)\n #endif\n#else\n/* unix needs nothing */\n #define FLANN_EXPORT\n#endif\n\n\n#ifdef FLANN_DEPRECATED\n#undef FLANN_DEPRECATED\n#endif\n#ifdef __GNUC__\n#define FLANN_DEPRECATED __attribute__ ((deprecated))\n#elif defined(_MSC_VER)\n#define FLANN_DEPRECATED __declspec(deprecated)\n#else\n#pragma message(\"WARNING: You need to implement FLANN_DEPRECATED for this compiler\")\n#define FLANN_DEPRECATED\n#endif\n\n\n#undef FLANN_PLATFORM_32_BIT\n#undef FLANN_PLATFORM_64_BIT\n#if defined __amd64__ || defined __x86_64__ || defined _WIN64 || defined _M_X64\n#define FLANN_PLATFORM_64_BIT\n#else\n#define FLANN_PLATFORM_32_BIT\n#endif\n\n\n#undef FLANN_ARRAY_LEN\n#define FLANN_ARRAY_LEN(a) (sizeof(a)/sizeof(a[0]))\n\nnamespace cvflann {\n\n/* Nearest neighbour index algorithms */\nenum flann_algorithm_t\n{\n    FLANN_INDEX_LINEAR = 0,\n    FLANN_INDEX_KDTREE = 1,\n    FLANN_INDEX_KMEANS = 2,\n    FLANN_INDEX_COMPOSITE = 3,\n    FLANN_INDEX_KDTREE_SINGLE = 4,\n    FLANN_INDEX_HIERARCHICAL = 5,\n    FLANN_INDEX_LSH = 6,\n    FLANN_INDEX_SAVED = 254,\n    FLANN_INDEX_AUTOTUNED = 255,\n\n    // deprecated constants, should use the FLANN_INDEX_* ones instead\n    LINEAR = 0,\n    KDTREE = 1,\n    KMEANS = 2,\n    COMPOSITE = 3,\n    KDTREE_SINGLE = 4,\n    SAVED = 254,\n    AUTOTUNED = 255\n};\n\n\n\nenum flann_centers_init_t\n{\n    FLANN_CENTERS_RANDOM = 0,\n    FLANN_CENTERS_GONZALES = 1,\n    FLANN_CENTERS_KMEANSPP = 2,\n    FLANN_CENTERS_GROUPWISE = 3,\n\n    // deprecated constants, should use the FLANN_CENTERS_* ones instead\n    CENTERS_RANDOM = 0,\n    CENTERS_GONZALES = 1,\n    CENTERS_KMEANSPP = 2\n};\n\nenum flann_log_level_t\n{\n    FLANN_LOG_NONE = 0,\n    FLANN_LOG_FATAL = 1,\n    FLANN_LOG_ERROR = 2,\n    FLANN_LOG_WARN = 3,\n    FLANN_LOG_INFO = 4\n};\n\nenum flann_distance_t\n{\n    FLANN_DIST_EUCLIDEAN = 1,\n    FLANN_DIST_L2 = 1,\n    FLANN_DIST_MANHATTAN = 2,\n    FLANN_DIST_L1 = 2,\n    FLANN_DIST_MINKOWSKI = 3,\n    FLANN_DIST_MAX   = 4,\n    FLANN_DIST_HIST_INTERSECT   = 5,\n    FLANN_DIST_HELLINGER = 6,\n    FLANN_DIST_CHI_SQUARE = 7,\n    FLANN_DIST_CS         = 7,\n    FLANN_DIST_KULLBACK_LEIBLER  = 8,\n    FLANN_DIST_KL                = 8,\n    FLANN_DIST_HAMMING          = 9,\n\n    // deprecated constants, should use the FLANN_DIST_* ones instead\n    EUCLIDEAN = 1,\n    MANHATTAN = 2,\n    MINKOWSKI = 3,\n    MAX_DIST   = 4,\n    HIST_INTERSECT   = 5,\n    HELLINGER = 6,\n    CS         = 7,\n    KL         = 8,\n    KULLBACK_LEIBLER  = 8\n};\n\nenum flann_datatype_t\n{\n    FLANN_INT8 = 0,\n    FLANN_INT16 = 1,\n    FLANN_INT32 = 2,\n    FLANN_INT64 = 3,\n    FLANN_UINT8 = 4,\n    FLANN_UINT16 = 5,\n    FLANN_UINT32 = 6,\n    FLANN_UINT64 = 7,\n    FLANN_FLOAT32 = 8,\n    FLANN_FLOAT64 = 9\n};\n\nenum\n{\n    FLANN_CHECKS_UNLIMITED = -1,\n    FLANN_CHECKS_AUTOTUNED = -2\n};\n\n}\n\n#endif /* OPENCV_FLANN_DEFINES_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/dist.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_DIST_H_\n#define OPENCV_FLANN_DIST_H_\n\n#include <cmath>\n#include <cstdlib>\n#include <string.h>\n#ifdef _MSC_VER\ntypedef unsigned __int32 uint32_t;\ntypedef unsigned __int64 uint64_t;\n#else\n#include <stdint.h>\n#endif\n\n#include \"defines.h\"\n\n#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)\n# include <Intrin.h>\n#endif\n\n#ifdef __ARM_NEON__\n# include \"arm_neon.h\"\n#endif\n\nnamespace cvflann\n{\n\ntemplate<typename T>\ninline T abs(T x) { return (x<0) ? -x : x; }\n\ntemplate<>\ninline int abs<int>(int x) { return ::abs(x); }\n\ntemplate<>\ninline float abs<float>(float x) { return fabsf(x); }\n\ntemplate<>\ninline double abs<double>(double x) { return fabs(x); }\n\ntemplate<typename T>\nstruct Accumulator { typedef T Type; };\ntemplate<>\nstruct Accumulator<unsigned char>  { typedef float Type; };\ntemplate<>\nstruct Accumulator<unsigned short> { typedef float Type; };\ntemplate<>\nstruct Accumulator<unsigned int> { typedef float Type; };\ntemplate<>\nstruct Accumulator<char>   { typedef float Type; };\ntemplate<>\nstruct Accumulator<short>  { typedef float Type; };\ntemplate<>\nstruct Accumulator<int> { typedef float Type; };\n\n#undef True\n#undef False\n\nclass True\n{\n};\n\nclass False\n{\n};\n\n\n/**\n * Squared Euclidean distance functor.\n *\n * This is the simpler, unrolled version. This is preferable for\n * very low dimensionality data (eg 3D points)\n */\ntemplate<class T>\nstruct L2_Simple\n{\n    typedef True is_kdtree_distance;\n    typedef True is_vector_space_distance;\n\n    typedef T ElementType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    template <typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const\n    {\n        ResultType result = ResultType();\n        ResultType diff;\n        for(size_t i = 0; i < size; ++i ) {\n            diff = *a++ - *b++;\n            result += diff*diff;\n        }\n        return result;\n    }\n\n    template <typename U, typename V>\n    inline ResultType accum_dist(const U& a, const V& b, int) const\n    {\n        return (a-b)*(a-b);\n    }\n};\n\n\n\n/**\n * Squared Euclidean distance functor, optimized version\n */\ntemplate<class T>\nstruct L2\n{\n    typedef True is_kdtree_distance;\n    typedef True is_vector_space_distance;\n\n    typedef T ElementType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    /**\n     *  Compute the squared Euclidean distance between two vectors.\n     *\n     *\tThis is highly optimised, with loop unrolling, as it is one\n     *\tof the most expensive inner loops.\n     *\n     *\tThe computation of squared root at the end is omitted for\n     *\tefficiency.\n     */\n    template <typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const\n    {\n        ResultType result = ResultType();\n        ResultType diff0, diff1, diff2, diff3;\n        Iterator1 last = a + size;\n        Iterator1 lastgroup = last - 3;\n\n        /* Process 4 items with each loop for efficiency. */\n        while (a < lastgroup) {\n            diff0 = (ResultType)(a[0] - b[0]);\n            diff1 = (ResultType)(a[1] - b[1]);\n            diff2 = (ResultType)(a[2] - b[2]);\n            diff3 = (ResultType)(a[3] - b[3]);\n            result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;\n            a += 4;\n            b += 4;\n\n            if ((worst_dist>0)&&(result>worst_dist)) {\n                return result;\n            }\n        }\n        /* Process last 0-3 pixels.  Not needed for standard vector lengths. */\n        while (a < last) {\n            diff0 = (ResultType)(*a++ - *b++);\n            result += diff0 * diff0;\n        }\n        return result;\n    }\n\n    /**\n     *\tPartial euclidean distance, using just one dimension. This is used by the\n     *\tkd-tree when computing partial distances while traversing the tree.\n     *\n     *\tSquared root is omitted for efficiency.\n     */\n    template <typename U, typename V>\n    inline ResultType accum_dist(const U& a, const V& b, int) const\n    {\n        return (a-b)*(a-b);\n    }\n};\n\n\n/*\n * Manhattan distance functor, optimized version\n */\ntemplate<class T>\nstruct L1\n{\n    typedef True is_kdtree_distance;\n    typedef True is_vector_space_distance;\n\n    typedef T ElementType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    /**\n     *  Compute the Manhattan (L_1) distance between two vectors.\n     *\n     *\tThis is highly optimised, with loop unrolling, as it is one\n     *\tof the most expensive inner loops.\n     */\n    template <typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const\n    {\n        ResultType result = ResultType();\n        ResultType diff0, diff1, diff2, diff3;\n        Iterator1 last = a + size;\n        Iterator1 lastgroup = last - 3;\n\n        /* Process 4 items with each loop for efficiency. */\n        while (a < lastgroup) {\n            diff0 = (ResultType)abs(a[0] - b[0]);\n            diff1 = (ResultType)abs(a[1] - b[1]);\n            diff2 = (ResultType)abs(a[2] - b[2]);\n            diff3 = (ResultType)abs(a[3] - b[3]);\n            result += diff0 + diff1 + diff2 + diff3;\n            a += 4;\n            b += 4;\n\n            if ((worst_dist>0)&&(result>worst_dist)) {\n                return result;\n            }\n        }\n        /* Process last 0-3 pixels.  Not needed for standard vector lengths. */\n        while (a < last) {\n            diff0 = (ResultType)abs(*a++ - *b++);\n            result += diff0;\n        }\n        return result;\n    }\n\n    /**\n     * Partial distance, used by the kd-tree.\n     */\n    template <typename U, typename V>\n    inline ResultType accum_dist(const U& a, const V& b, int) const\n    {\n        return abs(a-b);\n    }\n};\n\n\n\ntemplate<class T>\nstruct MinkowskiDistance\n{\n    typedef True is_kdtree_distance;\n    typedef True is_vector_space_distance;\n\n    typedef T ElementType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    int order;\n\n    MinkowskiDistance(int order_) : order(order_) {}\n\n    /**\n     *  Compute the Minkowsky (L_p) distance between two vectors.\n     *\n     *\tThis is highly optimised, with loop unrolling, as it is one\n     *\tof the most expensive inner loops.\n     *\n     *\tThe computation of squared root at the end is omitted for\n     *\tefficiency.\n     */\n    template <typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const\n    {\n        ResultType result = ResultType();\n        ResultType diff0, diff1, diff2, diff3;\n        Iterator1 last = a + size;\n        Iterator1 lastgroup = last - 3;\n\n        /* Process 4 items with each loop for efficiency. */\n        while (a < lastgroup) {\n            diff0 = (ResultType)abs(a[0] - b[0]);\n            diff1 = (ResultType)abs(a[1] - b[1]);\n            diff2 = (ResultType)abs(a[2] - b[2]);\n            diff3 = (ResultType)abs(a[3] - b[3]);\n            result += pow(diff0,order) + pow(diff1,order) + pow(diff2,order) + pow(diff3,order);\n            a += 4;\n            b += 4;\n\n            if ((worst_dist>0)&&(result>worst_dist)) {\n                return result;\n            }\n        }\n        /* Process last 0-3 pixels.  Not needed for standard vector lengths. */\n        while (a < last) {\n            diff0 = (ResultType)abs(*a++ - *b++);\n            result += pow(diff0,order);\n        }\n        return result;\n    }\n\n    /**\n     * Partial distance, used by the kd-tree.\n     */\n    template <typename U, typename V>\n    inline ResultType accum_dist(const U& a, const V& b, int) const\n    {\n        return pow(static_cast<ResultType>(abs(a-b)),order);\n    }\n};\n\n\n\ntemplate<class T>\nstruct MaxDistance\n{\n    typedef False is_kdtree_distance;\n    typedef True is_vector_space_distance;\n\n    typedef T ElementType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    /**\n     *  Compute the max distance (L_infinity) between two vectors.\n     *\n     *  This distance is not a valid kdtree distance, it's not dimensionwise additive.\n     */\n    template <typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const\n    {\n        ResultType result = ResultType();\n        ResultType diff0, diff1, diff2, diff3;\n        Iterator1 last = a + size;\n        Iterator1 lastgroup = last - 3;\n\n        /* Process 4 items with each loop for efficiency. */\n        while (a < lastgroup) {\n            diff0 = abs(a[0] - b[0]);\n            diff1 = abs(a[1] - b[1]);\n            diff2 = abs(a[2] - b[2]);\n            diff3 = abs(a[3] - b[3]);\n            if (diff0>result) {result = diff0; }\n            if (diff1>result) {result = diff1; }\n            if (diff2>result) {result = diff2; }\n            if (diff3>result) {result = diff3; }\n            a += 4;\n            b += 4;\n\n            if ((worst_dist>0)&&(result>worst_dist)) {\n                return result;\n            }\n        }\n        /* Process last 0-3 pixels.  Not needed for standard vector lengths. */\n        while (a < last) {\n            diff0 = abs(*a++ - *b++);\n            result = (diff0>result) ? diff0 : result;\n        }\n        return result;\n    }\n\n    /* This distance functor is not dimension-wise additive, which\n     * makes it an invalid kd-tree distance, not implementing the accum_dist method */\n\n};\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n/**\n * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor\n * bit count of A exclusive XOR'ed with B\n */\nstruct HammingLUT\n{\n    typedef False is_kdtree_distance;\n    typedef False is_vector_space_distance;\n\n    typedef unsigned char ElementType;\n    typedef int ResultType;\n\n    /** this will count the bits in a ^ b\n     */\n    ResultType operator()(const unsigned char* a, const unsigned char* b, size_t size) const\n    {\n        static const uchar popCountTable[] =\n        {\n            0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,\n            1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,\n            1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,\n            2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,\n            1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,\n            2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,\n            2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,\n            3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8\n        };\n        ResultType result = 0;\n        for (size_t i = 0; i < size; i++) {\n            result += popCountTable[a[i] ^ b[i]];\n        }\n        return result;\n    }\n};\n\n/**\n * Hamming distance functor (pop count between two binary vectors, i.e. xor them and count the number of bits set)\n * That code was taken from brief.cpp in OpenCV\n */\ntemplate<class T>\nstruct Hamming\n{\n    typedef False is_kdtree_distance;\n    typedef False is_vector_space_distance;\n\n\n    typedef T ElementType;\n    typedef int ResultType;\n\n    template<typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const\n    {\n        ResultType result = 0;\n#ifdef __ARM_NEON__\n        {\n            uint32x4_t bits = vmovq_n_u32(0);\n            for (size_t i = 0; i < size; i += 16) {\n                uint8x16_t A_vec = vld1q_u8 (a + i);\n                uint8x16_t B_vec = vld1q_u8 (b + i);\n                uint8x16_t AxorB = veorq_u8 (A_vec, B_vec);\n                uint8x16_t bitsSet = vcntq_u8 (AxorB);\n                uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet);\n                uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8);\n                bits = vaddq_u32(bits, bitSet4);\n            }\n            uint64x2_t bitSet2 = vpaddlq_u32 (bits);\n            result = vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),0);\n            result += vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),2);\n        }\n#elif __GNUC__\n        {\n            //for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll)\n            typedef unsigned long long pop_t;\n            const size_t modulo = size % sizeof(pop_t);\n            const pop_t* a2 = reinterpret_cast<const pop_t*> (a);\n            const pop_t* b2 = reinterpret_cast<const pop_t*> (b);\n            const pop_t* a2_end = a2 + (size / sizeof(pop_t));\n\n            for (; a2 != a2_end; ++a2, ++b2) result += __builtin_popcountll((*a2) ^ (*b2));\n\n            if (modulo) {\n                //in the case where size is not dividable by sizeof(size_t)\n                //need to mask off the bits at the end\n                pop_t a_final = 0, b_final = 0;\n                memcpy(&a_final, a2, modulo);\n                memcpy(&b_final, b2, modulo);\n                result += __builtin_popcountll(a_final ^ b_final);\n            }\n        }\n#else // NO NEON and NOT GNUC\n        typedef unsigned long long pop_t;\n        HammingLUT lut;\n        result = lut(reinterpret_cast<const unsigned char*> (a),\n                     reinterpret_cast<const unsigned char*> (b), size * sizeof(pop_t));\n#endif\n        return result;\n    }\n};\n\ntemplate<typename T>\nstruct Hamming2\n{\n    typedef False is_kdtree_distance;\n    typedef False is_vector_space_distance;\n\n    typedef T ElementType;\n    typedef int ResultType;\n\n    /** This is popcount_3() from:\n     * http://en.wikipedia.org/wiki/Hamming_weight */\n    unsigned int popcnt32(uint32_t n) const\n    {\n        n -= ((n >> 1) & 0x55555555);\n        n = (n & 0x33333333) + ((n >> 2) & 0x33333333);\n        return (((n + (n >> 4))& 0xF0F0F0F)* 0x1010101) >> 24;\n    }\n\n#ifdef FLANN_PLATFORM_64_BIT\n    unsigned int popcnt64(uint64_t n) const\n    {\n        n -= ((n >> 1) & 0x5555555555555555);\n        n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333);\n        return (((n + (n >> 4))& 0x0f0f0f0f0f0f0f0f)* 0x0101010101010101) >> 56;\n    }\n#endif\n\n    template <typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const\n    {\n#ifdef FLANN_PLATFORM_64_BIT\n        const uint64_t* pa = reinterpret_cast<const uint64_t*>(a);\n        const uint64_t* pb = reinterpret_cast<const uint64_t*>(b);\n        ResultType result = 0;\n        size /= (sizeof(uint64_t)/sizeof(unsigned char));\n        for(size_t i = 0; i < size; ++i ) {\n            result += popcnt64(*pa ^ *pb);\n            ++pa;\n            ++pb;\n        }\n#else\n        const uint32_t* pa = reinterpret_cast<const uint32_t*>(a);\n        const uint32_t* pb = reinterpret_cast<const uint32_t*>(b);\n        ResultType result = 0;\n        size /= (sizeof(uint32_t)/sizeof(unsigned char));\n        for(size_t i = 0; i < size; ++i ) {\n            result += popcnt32(*pa ^ *pb);\n            ++pa;\n            ++pb;\n        }\n#endif\n        return result;\n    }\n};\n\n\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\ntemplate<class T>\nstruct HistIntersectionDistance\n{\n    typedef True is_kdtree_distance;\n    typedef True is_vector_space_distance;\n\n    typedef T ElementType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    /**\n     *  Compute the histogram intersection distance\n     */\n    template <typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const\n    {\n        ResultType result = ResultType();\n        ResultType min0, min1, min2, min3;\n        Iterator1 last = a + size;\n        Iterator1 lastgroup = last - 3;\n\n        /* Process 4 items with each loop for efficiency. */\n        while (a < lastgroup) {\n            min0 = (ResultType)(a[0] < b[0] ? a[0] : b[0]);\n            min1 = (ResultType)(a[1] < b[1] ? a[1] : b[1]);\n            min2 = (ResultType)(a[2] < b[2] ? a[2] : b[2]);\n            min3 = (ResultType)(a[3] < b[3] ? a[3] : b[3]);\n            result += min0 + min1 + min2 + min3;\n            a += 4;\n            b += 4;\n            if ((worst_dist>0)&&(result>worst_dist)) {\n                return result;\n            }\n        }\n        /* Process last 0-3 pixels.  Not needed for standard vector lengths. */\n        while (a < last) {\n            min0 = (ResultType)(*a < *b ? *a : *b);\n            result += min0;\n            ++a;\n            ++b;\n        }\n        return result;\n    }\n\n    /**\n     * Partial distance, used by the kd-tree.\n     */\n    template <typename U, typename V>\n    inline ResultType accum_dist(const U& a, const V& b, int) const\n    {\n        return a<b ? a : b;\n    }\n};\n\n\n\ntemplate<class T>\nstruct HellingerDistance\n{\n    typedef True is_kdtree_distance;\n    typedef True is_vector_space_distance;\n\n    typedef T ElementType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    /**\n     *  Compute the Hellinger distance\n     */\n    template <typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const\n    {\n        ResultType result = ResultType();\n        ResultType diff0, diff1, diff2, diff3;\n        Iterator1 last = a + size;\n        Iterator1 lastgroup = last - 3;\n\n        /* Process 4 items with each loop for efficiency. */\n        while (a < lastgroup) {\n            diff0 = sqrt(static_cast<ResultType>(a[0])) - sqrt(static_cast<ResultType>(b[0]));\n            diff1 = sqrt(static_cast<ResultType>(a[1])) - sqrt(static_cast<ResultType>(b[1]));\n            diff2 = sqrt(static_cast<ResultType>(a[2])) - sqrt(static_cast<ResultType>(b[2]));\n            diff3 = sqrt(static_cast<ResultType>(a[3])) - sqrt(static_cast<ResultType>(b[3]));\n            result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;\n            a += 4;\n            b += 4;\n        }\n        while (a < last) {\n            diff0 = sqrt(static_cast<ResultType>(*a++)) - sqrt(static_cast<ResultType>(*b++));\n            result += diff0 * diff0;\n        }\n        return result;\n    }\n\n    /**\n     * Partial distance, used by the kd-tree.\n     */\n    template <typename U, typename V>\n    inline ResultType accum_dist(const U& a, const V& b, int) const\n    {\n        ResultType diff = sqrt(static_cast<ResultType>(a)) - sqrt(static_cast<ResultType>(b));\n        return diff * diff;\n    }\n};\n\n\ntemplate<class T>\nstruct ChiSquareDistance\n{\n    typedef True is_kdtree_distance;\n    typedef True is_vector_space_distance;\n\n    typedef T ElementType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    /**\n     *  Compute the chi-square distance\n     */\n    template <typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const\n    {\n        ResultType result = ResultType();\n        ResultType sum, diff;\n        Iterator1 last = a + size;\n\n        while (a < last) {\n            sum = (ResultType)(*a + *b);\n            if (sum>0) {\n                diff = (ResultType)(*a - *b);\n                result += diff*diff/sum;\n            }\n            ++a;\n            ++b;\n\n            if ((worst_dist>0)&&(result>worst_dist)) {\n                return result;\n            }\n        }\n        return result;\n    }\n\n    /**\n     * Partial distance, used by the kd-tree.\n     */\n    template <typename U, typename V>\n    inline ResultType accum_dist(const U& a, const V& b, int) const\n    {\n        ResultType result = ResultType();\n        ResultType sum, diff;\n\n        sum = (ResultType)(a+b);\n        if (sum>0) {\n            diff = (ResultType)(a-b);\n            result = diff*diff/sum;\n        }\n        return result;\n    }\n};\n\n\ntemplate<class T>\nstruct KL_Divergence\n{\n    typedef True is_kdtree_distance;\n    typedef True is_vector_space_distance;\n\n    typedef T ElementType;\n    typedef typename Accumulator<T>::Type ResultType;\n\n    /**\n     *  Compute the Kullback–Leibler divergence\n     */\n    template <typename Iterator1, typename Iterator2>\n    ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const\n    {\n        ResultType result = ResultType();\n        Iterator1 last = a + size;\n\n        while (a < last) {\n            if (* b != 0) {\n                ResultType ratio = (ResultType)(*a / *b);\n                if (ratio>0) {\n                    result += *a * log(ratio);\n                }\n            }\n            ++a;\n            ++b;\n\n            if ((worst_dist>0)&&(result>worst_dist)) {\n                return result;\n            }\n        }\n        return result;\n    }\n\n    /**\n     * Partial distance, used by the kd-tree.\n     */\n    template <typename U, typename V>\n    inline ResultType accum_dist(const U& a, const V& b, int) const\n    {\n        ResultType result = ResultType();\n        if( *b != 0 ) {\n            ResultType ratio = (ResultType)(a / b);\n            if (ratio>0) {\n                result = a * log(ratio);\n            }\n        }\n        return result;\n    }\n};\n\n\n\n/*\n * This is a \"zero iterator\". It basically behaves like a zero filled\n * array to all algorithms that use arrays as iterators (STL style).\n * It's useful when there's a need to compute the distance between feature\n * and origin it and allows for better compiler optimisation than using a\n * zero-filled array.\n */\ntemplate <typename T>\nstruct ZeroIterator\n{\n\n    T operator*()\n    {\n        return 0;\n    }\n\n    T operator[](int)\n    {\n        return 0;\n    }\n\n    const ZeroIterator<T>& operator ++()\n    {\n        return *this;\n    }\n\n    ZeroIterator<T> operator ++(int)\n    {\n        return *this;\n    }\n\n    ZeroIterator<T>& operator+=(int)\n    {\n        return *this;\n    }\n\n};\n\n\n/*\n * Depending on processed distances, some of them are already squared (e.g. L2)\n * and some are not (e.g.Hamming). In KMeans++ for instance we want to be sure\n * we are working on ^2 distances, thus following templates to ensure that.\n */\ntemplate <typename Distance, typename ElementType>\nstruct squareDistance\n{\n    typedef typename Distance::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return dist*dist; }\n};\n\n\ntemplate <typename ElementType>\nstruct squareDistance<L2_Simple<ElementType>, ElementType>\n{\n    typedef typename L2_Simple<ElementType>::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return dist; }\n};\n\ntemplate <typename ElementType>\nstruct squareDistance<L2<ElementType>, ElementType>\n{\n    typedef typename L2<ElementType>::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return dist; }\n};\n\n\ntemplate <typename ElementType>\nstruct squareDistance<MinkowskiDistance<ElementType>, ElementType>\n{\n    typedef typename MinkowskiDistance<ElementType>::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return dist; }\n};\n\ntemplate <typename ElementType>\nstruct squareDistance<HellingerDistance<ElementType>, ElementType>\n{\n    typedef typename HellingerDistance<ElementType>::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return dist; }\n};\n\ntemplate <typename ElementType>\nstruct squareDistance<ChiSquareDistance<ElementType>, ElementType>\n{\n    typedef typename ChiSquareDistance<ElementType>::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return dist; }\n};\n\n\ntemplate <typename Distance>\ntypename Distance::ResultType ensureSquareDistance( typename Distance::ResultType dist )\n{\n    typedef typename Distance::ElementType ElementType;\n\n    squareDistance<Distance, ElementType> dummy;\n    return dummy( dist );\n}\n\n\n/*\n * ...and a template to ensure the user that he will process the normal distance,\n * and not squared distance, without loosing processing time calling sqrt(ensureSquareDistance)\n * that will result in doing actually sqrt(dist*dist) for L1 distance for instance.\n */\ntemplate <typename Distance, typename ElementType>\nstruct simpleDistance\n{\n    typedef typename Distance::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return dist; }\n};\n\n\ntemplate <typename ElementType>\nstruct simpleDistance<L2_Simple<ElementType>, ElementType>\n{\n    typedef typename L2_Simple<ElementType>::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return sqrt(dist); }\n};\n\ntemplate <typename ElementType>\nstruct simpleDistance<L2<ElementType>, ElementType>\n{\n    typedef typename L2<ElementType>::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return sqrt(dist); }\n};\n\n\ntemplate <typename ElementType>\nstruct simpleDistance<MinkowskiDistance<ElementType>, ElementType>\n{\n    typedef typename MinkowskiDistance<ElementType>::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return sqrt(dist); }\n};\n\ntemplate <typename ElementType>\nstruct simpleDistance<HellingerDistance<ElementType>, ElementType>\n{\n    typedef typename HellingerDistance<ElementType>::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return sqrt(dist); }\n};\n\ntemplate <typename ElementType>\nstruct simpleDistance<ChiSquareDistance<ElementType>, ElementType>\n{\n    typedef typename ChiSquareDistance<ElementType>::ResultType ResultType;\n    ResultType operator()( ResultType dist ) { return sqrt(dist); }\n};\n\n\ntemplate <typename Distance>\ntypename Distance::ResultType ensureSimpleDistance( typename Distance::ResultType dist )\n{\n    typedef typename Distance::ElementType ElementType;\n\n    simpleDistance<Distance, ElementType> dummy;\n    return dummy( dist );\n}\n\n}\n\n#endif //OPENCV_FLANN_DIST_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/dummy.h",
    "content": "\n#ifndef OPENCV_FLANN_DUMMY_H_\n#define OPENCV_FLANN_DUMMY_H_\n\nnamespace cvflann\n{\n\n#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS\n__declspec(dllexport)\n#endif\nvoid dummyfunc();\n\n}\n\n\n#endif  /* OPENCV_FLANN_DUMMY_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/dynamic_bitset.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n/***********************************************************************\n * Author: Vincent Rabaud\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_DYNAMIC_BITSET_H_\n#define OPENCV_FLANN_DYNAMIC_BITSET_H_\n\n#ifndef FLANN_USE_BOOST\n#  define FLANN_USE_BOOST 0\n#endif\n//#define FLANN_USE_BOOST 1\n#if FLANN_USE_BOOST\n#include <boost/dynamic_bitset.hpp>\ntypedef boost::dynamic_bitset<> DynamicBitset;\n#else\n\n#include <limits.h>\n\n#include \"dist.h\"\n\nnamespace cvflann {\n\n/** Class re-implementing the boost version of it\n * This helps not depending on boost, it also does not do the bound checks\n * and has a way to reset a block for speed\n */\nclass DynamicBitset\n{\npublic:\n    /** default constructor\n     */\n    DynamicBitset()\n    {\n    }\n\n    /** only constructor we use in our code\n     * @param sz the size of the bitset (in bits)\n     */\n    DynamicBitset(size_t sz)\n    {\n        resize(sz);\n        reset();\n    }\n\n    /** Sets all the bits to 0\n     */\n    void clear()\n    {\n        std::fill(bitset_.begin(), bitset_.end(), 0);\n    }\n\n    /** @brief checks if the bitset is empty\n     * @return true if the bitset is empty\n     */\n    bool empty() const\n    {\n        return bitset_.empty();\n    }\n\n    /** set all the bits to 0\n     */\n    void reset()\n    {\n        std::fill(bitset_.begin(), bitset_.end(), 0);\n    }\n\n    /** @brief set one bit to 0\n     * @param index\n     */\n    void reset(size_t index)\n    {\n        bitset_[index / cell_bit_size_] &= ~(size_t(1) << (index % cell_bit_size_));\n    }\n\n    /** @brief sets a specific bit to 0, and more bits too\n     * This function is useful when resetting a given set of bits so that the\n     * whole bitset ends up being 0: if that's the case, we don't care about setting\n     * other bits to 0\n     * @param index\n     */\n    void reset_block(size_t index)\n    {\n        bitset_[index / cell_bit_size_] = 0;\n    }\n\n    /** resize the bitset so that it contains at least sz bits\n     * @param sz\n     */\n    void resize(size_t sz)\n    {\n        size_ = sz;\n        bitset_.resize(sz / cell_bit_size_ + 1);\n    }\n\n    /** set a bit to true\n     * @param index the index of the bit to set to 1\n     */\n    void set(size_t index)\n    {\n        bitset_[index / cell_bit_size_] |= size_t(1) << (index % cell_bit_size_);\n    }\n\n    /** gives the number of contained bits\n     */\n    size_t size() const\n    {\n        return size_;\n    }\n\n    /** check if a bit is set\n     * @param index the index of the bit to check\n     * @return true if the bit is set\n     */\n    bool test(size_t index) const\n    {\n        return (bitset_[index / cell_bit_size_] & (size_t(1) << (index % cell_bit_size_))) != 0;\n    }\n\nprivate:\n    std::vector<size_t> bitset_;\n    size_t size_;\n    static const unsigned int cell_bit_size_ = CHAR_BIT * sizeof(size_t);\n};\n\n} // namespace cvflann\n\n#endif\n\n#endif // OPENCV_FLANN_DYNAMIC_BITSET_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/flann.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/flann.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/flann_base.hpp",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_BASE_HPP_\n#define OPENCV_FLANN_BASE_HPP_\n\n#include <vector>\n#include <cassert>\n#include <cstdio>\n\n#include \"general.h\"\n#include \"matrix.h\"\n#include \"params.h\"\n#include \"saving.h\"\n\n#include \"all_indices.h\"\n\nnamespace cvflann\n{\n\n/**\n * Sets the log level used for all flann functions\n * @param level Verbosity level\n */\ninline void log_verbosity(int level)\n{\n    if (level >= 0) {\n        Logger::setLevel(level);\n    }\n}\n\n/**\n * (Deprecated) Index parameters for creating a saved index.\n */\nstruct SavedIndexParams : public IndexParams\n{\n    SavedIndexParams(cv::String filename)\n    {\n        (* this)[\"algorithm\"] = FLANN_INDEX_SAVED;\n        (*this)[\"filename\"] = filename;\n    }\n};\n\n\ntemplate<typename Distance>\nNNIndex<Distance>* load_saved_index(const Matrix<typename Distance::ElementType>& dataset, const cv::String& filename, Distance distance)\n{\n    typedef typename Distance::ElementType ElementType;\n\n    FILE* fin = fopen(filename.c_str(), \"rb\");\n    if (fin == NULL) {\n        return NULL;\n    }\n    IndexHeader header = load_header(fin);\n    if (header.data_type != Datatype<ElementType>::type()) {\n        throw FLANNException(\"Datatype of saved index is different than of the one to be created.\");\n    }\n    if ((size_t(header.rows) != dataset.rows)||(size_t(header.cols) != dataset.cols)) {\n        throw FLANNException(\"The index saved belongs to a different dataset\");\n    }\n\n    IndexParams params;\n    params[\"algorithm\"] = header.index_type;\n    NNIndex<Distance>* nnIndex = create_index_by_type<Distance>(dataset, params, distance);\n    nnIndex->loadIndex(fin);\n    fclose(fin);\n\n    return nnIndex;\n}\n\n\ntemplate<typename Distance>\nclass Index : public NNIndex<Distance>\n{\npublic:\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\n    Index(const Matrix<ElementType>& features, const IndexParams& params, Distance distance = Distance() )\n        : index_params_(params)\n    {\n        flann_algorithm_t index_type = get_param<flann_algorithm_t>(params,\"algorithm\");\n        loaded_ = false;\n\n        if (index_type == FLANN_INDEX_SAVED) {\n            nnIndex_ = load_saved_index<Distance>(features, get_param<cv::String>(params,\"filename\"), distance);\n            loaded_ = true;\n        }\n        else {\n            nnIndex_ = create_index_by_type<Distance>(features, params, distance);\n        }\n    }\n\n    ~Index()\n    {\n        delete nnIndex_;\n    }\n\n    /**\n     * Builds the index.\n     */\n    void buildIndex()\n    {\n        if (!loaded_) {\n            nnIndex_->buildIndex();\n        }\n    }\n\n    void save(cv::String filename)\n    {\n        FILE* fout = fopen(filename.c_str(), \"wb\");\n        if (fout == NULL) {\n            throw FLANNException(\"Cannot open file\");\n        }\n        save_header(fout, *nnIndex_);\n        saveIndex(fout);\n        fclose(fout);\n    }\n\n    /**\n     * \\brief Saves the index to a stream\n     * \\param stream The stream to save the index to\n     */\n    virtual void saveIndex(FILE* stream)\n    {\n        nnIndex_->saveIndex(stream);\n    }\n\n    /**\n     * \\brief Loads the index from a stream\n     * \\param stream The stream from which the index is loaded\n     */\n    virtual void loadIndex(FILE* stream)\n    {\n        nnIndex_->loadIndex(stream);\n    }\n\n    /**\n     * \\returns number of features in this index.\n     */\n    size_t veclen() const\n    {\n        return nnIndex_->veclen();\n    }\n\n    /**\n     * \\returns The dimensionality of the features in this index.\n     */\n    size_t size() const\n    {\n        return nnIndex_->size();\n    }\n\n    /**\n     * \\returns The index type (kdtree, kmeans,...)\n     */\n    flann_algorithm_t getType() const\n    {\n        return nnIndex_->getType();\n    }\n\n    /**\n     * \\returns The amount of memory (in bytes) used by the index.\n     */\n    virtual int usedMemory() const\n    {\n        return nnIndex_->usedMemory();\n    }\n\n\n    /**\n     * \\returns The index parameters\n     */\n    IndexParams getParameters() const\n    {\n        return nnIndex_->getParameters();\n    }\n\n    /**\n     * \\brief Perform k-nearest neighbor search\n     * \\param[in] queries The query points for which to find the nearest neighbors\n     * \\param[out] indices The indices of the nearest neighbors found\n     * \\param[out] dists Distances to the nearest neighbors found\n     * \\param[in] knn Number of nearest neighbors to return\n     * \\param[in] params Search parameters\n     */\n    void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)\n    {\n        nnIndex_->knnSearch(queries, indices, dists, knn, params);\n    }\n\n    /**\n     * \\brief Perform radius search\n     * \\param[in] query The query point\n     * \\param[out] indices The indinces of the neighbors found within the given radius\n     * \\param[out] dists The distances to the nearest neighbors found\n     * \\param[in] radius The radius used for search\n     * \\param[in] params Search parameters\n     * \\returns Number of neighbors found\n     */\n    int radiusSearch(const Matrix<ElementType>& query, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params)\n    {\n        return nnIndex_->radiusSearch(query, indices, dists, radius, params);\n    }\n\n    /**\n     * \\brief Method that searches for nearest-neighbours\n     */\n    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)\n    {\n        nnIndex_->findNeighbors(result, vec, searchParams);\n    }\n\n    /**\n     * \\brief Returns actual index\n     */\n    FLANN_DEPRECATED NNIndex<Distance>* getIndex()\n    {\n        return nnIndex_;\n    }\n\n    /**\n     * \\brief Returns index parameters.\n     * \\deprecated use getParameters() instead.\n     */\n    FLANN_DEPRECATED  const IndexParams* getIndexParameters()\n    {\n        return &index_params_;\n    }\n\nprivate:\n    /** Pointer to actual index class */\n    NNIndex<Distance>* nnIndex_;\n    /** Indices if the index was loaded from a file */\n    bool loaded_;\n    /** Parameters passed to the index */\n    IndexParams index_params_;\n};\n\n/**\n * Performs a hierarchical clustering of the points passed as argument and then takes a cut in the\n * the clustering tree to return a flat clustering.\n * @param[in] points Points to be clustered\n * @param centers The computed cluster centres. Matrix should be preallocated and centers.rows is the\n *  number of clusters requested.\n * @param params Clustering parameters (The same as for cvflann::KMeansIndex)\n * @param d Distance to be used for clustering (eg: cvflann::L2)\n * @return number of clusters computed (can be different than clusters.rows and is the highest number\n * of the form (branching-1)*K+1 smaller than clusters.rows).\n */\ntemplate <typename Distance>\nint hierarchicalClustering(const Matrix<typename Distance::ElementType>& points, Matrix<typename Distance::ResultType>& centers,\n                           const KMeansIndexParams& params, Distance d = Distance())\n{\n    KMeansIndex<Distance> kmeans(points, params, d);\n    kmeans.buildIndex();\n\n    int clusterNum = kmeans.getClusterCenters(centers);\n    return clusterNum;\n}\n\n}\n#endif /* OPENCV_FLANN_BASE_HPP_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/general.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_GENERAL_H_\n#define OPENCV_FLANN_GENERAL_H_\n\n#include \"opencv2/core.hpp\"\n\nnamespace cvflann\n{\n\nclass FLANNException : public cv::Exception\n{\npublic:\n    FLANNException(const char* message) : cv::Exception(0, message, \"\", __FILE__, __LINE__) { }\n\n    FLANNException(const cv::String& message) : cv::Exception(0, message, \"\", __FILE__, __LINE__) { }\n};\n\n}\n\n\n#endif  /* OPENCV_FLANN_GENERAL_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/ground_truth.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_GROUND_TRUTH_H_\n#define OPENCV_FLANN_GROUND_TRUTH_H_\n\n#include \"dist.h\"\n#include \"matrix.h\"\n\n\nnamespace cvflann\n{\n\ntemplate <typename Distance>\nvoid find_nearest(const Matrix<typename Distance::ElementType>& dataset, typename Distance::ElementType* query, int* matches, int nn,\n                  int skip = 0, Distance distance = Distance())\n{\n    typedef typename Distance::ResultType DistanceType;\n    int n = nn + skip;\n\n    std::vector<int> match(n);\n    std::vector<DistanceType> dists(n);\n\n    dists[0] = distance(dataset[0], query, dataset.cols);\n    match[0] = 0;\n    int dcnt = 1;\n\n    for (size_t i=1; i<dataset.rows; ++i) {\n        DistanceType tmp = distance(dataset[i], query, dataset.cols);\n\n        if (dcnt<n) {\n            match[dcnt] = (int)i;\n            dists[dcnt++] = tmp;\n        }\n        else if (tmp < dists[dcnt-1]) {\n            dists[dcnt-1] = tmp;\n            match[dcnt-1] = (int)i;\n        }\n\n        int j = dcnt-1;\n        // bubble up\n        while (j>=1 && dists[j]<dists[j-1]) {\n            std::swap(dists[j],dists[j-1]);\n            std::swap(match[j],match[j-1]);\n            j--;\n        }\n    }\n\n    for (int i=0; i<nn; ++i) {\n        matches[i] = match[i+skip];\n    }\n}\n\n\ntemplate <typename Distance>\nvoid compute_ground_truth(const Matrix<typename Distance::ElementType>& dataset, const Matrix<typename Distance::ElementType>& testset, Matrix<int>& matches,\n                          int skip=0, Distance d = Distance())\n{\n    for (size_t i=0; i<testset.rows; ++i) {\n        find_nearest<Distance>(dataset, testset[i], matches[i], (int)matches.cols, skip, d);\n    }\n}\n\n\n}\n\n#endif //OPENCV_FLANN_GROUND_TRUTH_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/hdf5.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n\n#ifndef OPENCV_FLANN_HDF5_H_\n#define OPENCV_FLANN_HDF5_H_\n\n#include <hdf5.h>\n\n#include \"matrix.h\"\n\n\nnamespace cvflann\n{\n\nnamespace\n{\n\ntemplate<typename T>\nhid_t get_hdf5_type()\n{\n    throw FLANNException(\"Unsupported type for IO operations\");\n}\n\ntemplate<>\nhid_t get_hdf5_type<char>() { return H5T_NATIVE_CHAR; }\ntemplate<>\nhid_t get_hdf5_type<unsigned char>() { return H5T_NATIVE_UCHAR; }\ntemplate<>\nhid_t get_hdf5_type<short int>() { return H5T_NATIVE_SHORT; }\ntemplate<>\nhid_t get_hdf5_type<unsigned short int>() { return H5T_NATIVE_USHORT; }\ntemplate<>\nhid_t get_hdf5_type<int>() { return H5T_NATIVE_INT; }\ntemplate<>\nhid_t get_hdf5_type<unsigned int>() { return H5T_NATIVE_UINT; }\ntemplate<>\nhid_t get_hdf5_type<long>() { return H5T_NATIVE_LONG; }\ntemplate<>\nhid_t get_hdf5_type<unsigned long>() { return H5T_NATIVE_ULONG; }\ntemplate<>\nhid_t get_hdf5_type<float>() { return H5T_NATIVE_FLOAT; }\ntemplate<>\nhid_t get_hdf5_type<double>() { return H5T_NATIVE_DOUBLE; }\n}\n\n\n#define CHECK_ERROR(x,y) if ((x)<0) throw FLANNException((y));\n\ntemplate<typename T>\nvoid save_to_file(const cvflann::Matrix<T>& dataset, const String& filename, const String& name)\n{\n\n#if H5Eset_auto_vers == 2\n    H5Eset_auto( H5E_DEFAULT, NULL, NULL );\n#else\n    H5Eset_auto( NULL, NULL );\n#endif\n\n    herr_t status;\n    hid_t file_id;\n    file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);\n    if (file_id < 0) {\n        file_id = H5Fcreate(filename.c_str(), H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT);\n    }\n    CHECK_ERROR(file_id,\"Error creating hdf5 file.\");\n\n    hsize_t     dimsf[2];              // dataset dimensions\n    dimsf[0] = dataset.rows;\n    dimsf[1] = dataset.cols;\n\n    hid_t space_id = H5Screate_simple(2, dimsf, NULL);\n    hid_t memspace_id = H5Screate_simple(2, dimsf, NULL);\n\n    hid_t dataset_id;\n#if H5Dcreate_vers == 2\n    dataset_id = H5Dcreate2(file_id, name.c_str(), get_hdf5_type<T>(), space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);\n#else\n    dataset_id = H5Dcreate(file_id, name.c_str(), get_hdf5_type<T>(), space_id, H5P_DEFAULT);\n#endif\n\n    if (dataset_id<0) {\n#if H5Dopen_vers == 2\n        dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT);\n#else\n        dataset_id = H5Dopen(file_id, name.c_str());\n#endif\n    }\n    CHECK_ERROR(dataset_id,\"Error creating or opening dataset in file.\");\n\n    status = H5Dwrite(dataset_id, get_hdf5_type<T>(), memspace_id, space_id, H5P_DEFAULT, dataset.data );\n    CHECK_ERROR(status, \"Error writing to dataset\");\n\n    H5Sclose(memspace_id);\n    H5Sclose(space_id);\n    H5Dclose(dataset_id);\n    H5Fclose(file_id);\n\n}\n\n\ntemplate<typename T>\nvoid load_from_file(cvflann::Matrix<T>& dataset, const String& filename, const String& name)\n{\n    herr_t status;\n    hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);\n    CHECK_ERROR(file_id,\"Error opening hdf5 file.\");\n\n    hid_t dataset_id;\n#if H5Dopen_vers == 2\n    dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT);\n#else\n    dataset_id = H5Dopen(file_id, name.c_str());\n#endif\n    CHECK_ERROR(dataset_id,\"Error opening dataset in file.\");\n\n    hid_t space_id = H5Dget_space(dataset_id);\n\n    hsize_t dims_out[2];\n    H5Sget_simple_extent_dims(space_id, dims_out, NULL);\n\n    dataset = cvflann::Matrix<T>(new T[dims_out[0]*dims_out[1]], dims_out[0], dims_out[1]);\n\n    status = H5Dread(dataset_id, get_hdf5_type<T>(), H5S_ALL, H5S_ALL, H5P_DEFAULT, dataset[0]);\n    CHECK_ERROR(status, \"Error reading dataset\");\n\n    H5Sclose(space_id);\n    H5Dclose(dataset_id);\n    H5Fclose(file_id);\n}\n\n\n#ifdef HAVE_MPI\n\nnamespace mpi\n{\n/**\n * Loads a the hyperslice corresponding to this processor from a hdf5 file.\n * @param flann_dataset Dataset where the data is loaded\n * @param filename HDF5 file name\n * @param name Name of dataset inside file\n */\ntemplate<typename T>\nvoid load_from_file(cvflann::Matrix<T>& dataset, const String& filename, const String& name)\n{\n    MPI_Comm comm  = MPI_COMM_WORLD;\n    MPI_Info info  = MPI_INFO_NULL;\n\n    int mpi_size, mpi_rank;\n    MPI_Comm_size(comm, &mpi_size);\n    MPI_Comm_rank(comm, &mpi_rank);\n\n    herr_t status;\n\n    hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);\n    H5Pset_fapl_mpio(plist_id, comm, info);\n    hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id);\n    CHECK_ERROR(file_id,\"Error opening hdf5 file.\");\n    H5Pclose(plist_id);\n    hid_t dataset_id;\n#if H5Dopen_vers == 2\n    dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT);\n#else\n    dataset_id = H5Dopen(file_id, name.c_str());\n#endif\n    CHECK_ERROR(dataset_id,\"Error opening dataset in file.\");\n\n    hid_t space_id = H5Dget_space(dataset_id);\n    hsize_t dims[2];\n    H5Sget_simple_extent_dims(space_id, dims, NULL);\n\n    hsize_t count[2];\n    hsize_t offset[2];\n\n    hsize_t item_cnt = dims[0]/mpi_size+(dims[0]%mpi_size==0 ? 0 : 1);\n    hsize_t cnt = (mpi_rank<mpi_size-1 ? item_cnt : dims[0]-item_cnt*(mpi_size-1));\n\n    count[0] = cnt;\n    count[1] = dims[1];\n    offset[0] = mpi_rank*item_cnt;\n    offset[1] = 0;\n\n    hid_t memspace_id = H5Screate_simple(2,count,NULL);\n\n    H5Sselect_hyperslab(space_id, H5S_SELECT_SET, offset, NULL, count, NULL);\n\n    dataset.rows = count[0];\n    dataset.cols = count[1];\n    dataset.data = new T[dataset.rows*dataset.cols];\n\n    plist_id = H5Pcreate(H5P_DATASET_XFER);\n    H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);\n    status = H5Dread(dataset_id, get_hdf5_type<T>(), memspace_id, space_id, plist_id, dataset.data);\n    CHECK_ERROR(status, \"Error reading dataset\");\n\n    H5Pclose(plist_id);\n    H5Sclose(space_id);\n    H5Sclose(memspace_id);\n    H5Dclose(dataset_id);\n    H5Fclose(file_id);\n}\n}\n#endif // HAVE_MPI\n} // namespace cvflann::mpi\n\n#endif /* OPENCV_FLANN_HDF5_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/heap.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_HEAP_H_\n#define OPENCV_FLANN_HEAP_H_\n\n#include <algorithm>\n#include <vector>\n\nnamespace cvflann\n{\n\n/**\n * Priority Queue Implementation\n *\n * The priority queue is implemented with a heap.  A heap is a complete\n * (full) binary tree in which each parent is less than both of its\n * children, but the order of the children is unspecified.\n */\ntemplate <typename T>\nclass Heap\n{\n\n    /**\n     * Storage array for the heap.\n     * Type T must be comparable.\n     */\n    std::vector<T> heap;\n    int length;\n\n    /**\n     * Number of element in the heap\n     */\n    int count;\n\n\n\npublic:\n    /**\n     * Constructor.\n     *\n     * Params:\n     *     sz = heap size\n     */\n\n    Heap(int sz)\n    {\n        length = sz;\n        heap.reserve(length);\n        count = 0;\n    }\n\n    /**\n     *\n     * Returns: heap size\n     */\n    int size()\n    {\n        return count;\n    }\n\n    /**\n     * Tests if the heap is empty\n     *\n     * Returns: true is heap empty, false otherwise\n     */\n    bool empty()\n    {\n        return size()==0;\n    }\n\n    /**\n     * Clears the heap.\n     */\n    void clear()\n    {\n        heap.clear();\n        count = 0;\n    }\n\n    struct CompareT\n    {\n        bool operator()(const T& t_1, const T& t_2) const\n        {\n            return t_2 < t_1;\n        }\n    };\n\n    /**\n     * Insert a new element in the heap.\n     *\n     * We select the next empty leaf node, and then keep moving any larger\n     * parents down until the right location is found to store this element.\n     *\n     * Params:\n     *     value = the new element to be inserted in the heap\n     */\n    void insert(T value)\n    {\n        /* If heap is full, then return without adding this element. */\n        if (count == length) {\n            return;\n        }\n\n        heap.push_back(value);\n        static CompareT compareT;\n        std::push_heap(heap.begin(), heap.end(), compareT);\n        ++count;\n    }\n\n\n\n    /**\n     * Returns the node of minimum value from the heap (top of the heap).\n     *\n     * Params:\n     *     value = out parameter used to return the min element\n     * Returns: false if heap empty\n     */\n    bool popMin(T& value)\n    {\n        if (count == 0) {\n            return false;\n        }\n\n        value = heap[0];\n        static CompareT compareT;\n        std::pop_heap(heap.begin(), heap.end(), compareT);\n        heap.pop_back();\n        --count;\n\n        return true;  /* Return old last node. */\n    }\n};\n\n}\n\n#endif //OPENCV_FLANN_HEAP_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/hierarchical_clustering_index.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2011  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2011  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_\n#define OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_\n\n#include <algorithm>\n#include <map>\n#include <cassert>\n#include <limits>\n#include <cmath>\n\n#include \"general.h\"\n#include \"nn_index.h\"\n#include \"dist.h\"\n#include \"matrix.h\"\n#include \"result_set.h\"\n#include \"heap.h\"\n#include \"allocator.h\"\n#include \"random.h\"\n#include \"saving.h\"\n\n\nnamespace cvflann\n{\n\nstruct HierarchicalClusteringIndexParams : public IndexParams\n{\n    HierarchicalClusteringIndexParams(int branching = 32,\n                                      flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM,\n                                      int trees = 4, int leaf_size = 100)\n    {\n        (*this)[\"algorithm\"] = FLANN_INDEX_HIERARCHICAL;\n        // The branching factor used in the hierarchical clustering\n        (*this)[\"branching\"] = branching;\n        // Algorithm used for picking the initial cluster centers\n        (*this)[\"centers_init\"] = centers_init;\n        // number of parallel trees to build\n        (*this)[\"trees\"] = trees;\n        // maximum leaf size\n        (*this)[\"leaf_size\"] = leaf_size;\n    }\n};\n\n\n/**\n * Hierarchical index\n *\n * Contains a tree constructed through a hierarchical clustering\n * and other information for indexing a set of points for nearest-neighbour matching.\n */\ntemplate <typename Distance>\nclass HierarchicalClusteringIndex : public NNIndex<Distance>\n{\npublic:\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\nprivate:\n\n\n    typedef void (HierarchicalClusteringIndex::* centersAlgFunction)(int, int*, int, int*, int&);\n\n    /**\n     * The function used for choosing the cluster centers.\n     */\n    centersAlgFunction chooseCenters;\n\n\n\n    /**\n     * Chooses the initial centers in the k-means clustering in a random manner.\n     *\n     * Params:\n     *     k = number of centers\n     *     vecs = the dataset of points\n     *     indices = indices in the dataset\n     *     indices_length = length of indices vector\n     *\n     */\n    void chooseCentersRandom(int k, int* dsindices, int indices_length, int* centers, int& centers_length)\n    {\n        UniqueRandom r(indices_length);\n\n        int index;\n        for (index=0; index<k; ++index) {\n            bool duplicate = true;\n            int rnd;\n            while (duplicate) {\n                duplicate = false;\n                rnd = r.next();\n                if (rnd<0) {\n                    centers_length = index;\n                    return;\n                }\n\n                centers[index] = dsindices[rnd];\n\n                for (int j=0; j<index; ++j) {\n                    DistanceType sq = distance(dataset[centers[index]], dataset[centers[j]], dataset.cols);\n                    if (sq<1e-16) {\n                        duplicate = true;\n                    }\n                }\n            }\n        }\n\n        centers_length = index;\n    }\n\n\n    /**\n     * Chooses the initial centers in the k-means using Gonzales' algorithm\n     * so that the centers are spaced apart from each other.\n     *\n     * Params:\n     *     k = number of centers\n     *     vecs = the dataset of points\n     *     indices = indices in the dataset\n     * Returns:\n     */\n    void chooseCentersGonzales(int k, int* dsindices, int indices_length, int* centers, int& centers_length)\n    {\n        int n = indices_length;\n\n        int rnd = rand_int(n);\n        assert(rnd >=0 && rnd < n);\n\n        centers[0] = dsindices[rnd];\n\n        int index;\n        for (index=1; index<k; ++index) {\n\n            int best_index = -1;\n            DistanceType best_val = 0;\n            for (int j=0; j<n; ++j) {\n                DistanceType dist = distance(dataset[centers[0]],dataset[dsindices[j]],dataset.cols);\n                for (int i=1; i<index; ++i) {\n                    DistanceType tmp_dist = distance(dataset[centers[i]],dataset[dsindices[j]],dataset.cols);\n                    if (tmp_dist<dist) {\n                        dist = tmp_dist;\n                    }\n                }\n                if (dist>best_val) {\n                    best_val = dist;\n                    best_index = j;\n                }\n            }\n            if (best_index!=-1) {\n                centers[index] = dsindices[best_index];\n            }\n            else {\n                break;\n            }\n        }\n        centers_length = index;\n    }\n\n\n    /**\n     * Chooses the initial centers in the k-means using the algorithm\n     * proposed in the KMeans++ paper:\n     * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding\n     *\n     * Implementation of this function was converted from the one provided in Arthur's code.\n     *\n     * Params:\n     *     k = number of centers\n     *     vecs = the dataset of points\n     *     indices = indices in the dataset\n     * Returns:\n     */\n    void chooseCentersKMeanspp(int k, int* dsindices, int indices_length, int* centers, int& centers_length)\n    {\n        int n = indices_length;\n\n        double currentPot = 0;\n        DistanceType* closestDistSq = new DistanceType[n];\n\n        // Choose one random center and set the closestDistSq values\n        int index = rand_int(n);\n        assert(index >=0 && index < n);\n        centers[0] = dsindices[index];\n\n        // Computing distance^2 will have the advantage of even higher probability further to pick new centers\n        // far from previous centers (and this complies to \"k-means++: the advantages of careful seeding\" article)\n        for (int i = 0; i < n; i++) {\n            closestDistSq[i] = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols);\n            closestDistSq[i] = ensureSquareDistance<Distance>( closestDistSq[i] );\n            currentPot += closestDistSq[i];\n        }\n\n\n        const int numLocalTries = 1;\n\n        // Choose each center\n        int centerCount;\n        for (centerCount = 1; centerCount < k; centerCount++) {\n\n            // Repeat several trials\n            double bestNewPot = -1;\n            int bestNewIndex = 0;\n            for (int localTrial = 0; localTrial < numLocalTries; localTrial++) {\n\n                // Choose our center - have to be slightly careful to return a valid answer even accounting\n                // for possible rounding errors\n                double randVal = rand_double(currentPot);\n                for (index = 0; index < n-1; index++) {\n                    if (randVal <= closestDistSq[index]) break;\n                    else randVal -= closestDistSq[index];\n                }\n\n                // Compute the new potential\n                double newPot = 0;\n                for (int i = 0; i < n; i++) {\n                    DistanceType dist = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols);\n                    newPot += std::min( ensureSquareDistance<Distance>(dist), closestDistSq[i] );\n                }\n\n                // Store the best result\n                if ((bestNewPot < 0)||(newPot < bestNewPot)) {\n                    bestNewPot = newPot;\n                    bestNewIndex = index;\n                }\n            }\n\n            // Add the appropriate center\n            centers[centerCount] = dsindices[bestNewIndex];\n            currentPot = bestNewPot;\n            for (int i = 0; i < n; i++) {\n                DistanceType dist = distance(dataset[dsindices[i]], dataset[dsindices[bestNewIndex]], dataset.cols);\n                closestDistSq[i] = std::min( ensureSquareDistance<Distance>(dist), closestDistSq[i] );\n            }\n        }\n\n        centers_length = centerCount;\n\n        delete[] closestDistSq;\n    }\n\n\n    /**\n     * Chooses the initial centers in a way inspired by Gonzales (by Pierre-Emmanuel Viel):\n     * select the first point of the list as a candidate, then parse the points list. If another\n     * point is further than current candidate from the other centers, test if it is a good center\n     * of a local aggregation. If it is, replace current candidate by this point. And so on...\n     *\n     * Used with KMeansIndex that computes centers coordinates by averaging positions of clusters points,\n     * this doesn't make a real difference with previous methods. But used with HierarchicalClusteringIndex\n     * class that pick centers among existing points instead of computing the barycenters, there is a real\n     * improvement.\n     *\n     * Params:\n     *     k = number of centers\n     *     vecs = the dataset of points\n     *     indices = indices in the dataset\n     * Returns:\n     */\n    void GroupWiseCenterChooser(int k, int* dsindices, int indices_length, int* centers, int& centers_length)\n    {\n        const float kSpeedUpFactor = 1.3f;\n\n        int n = indices_length;\n\n        DistanceType* closestDistSq = new DistanceType[n];\n\n        // Choose one random center and set the closestDistSq values\n        int index = rand_int(n);\n        assert(index >=0 && index < n);\n        centers[0] = dsindices[index];\n\n        for (int i = 0; i < n; i++) {\n            closestDistSq[i] = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols);\n        }\n\n\n        // Choose each center\n        int centerCount;\n        for (centerCount = 1; centerCount < k; centerCount++) {\n\n            // Repeat several trials\n            double bestNewPot = -1;\n            int bestNewIndex = 0;\n            DistanceType furthest = 0;\n            for (index = 0; index < n; index++) {\n\n                // We will test only the potential of the points further than current candidate\n                if( closestDistSq[index] > kSpeedUpFactor * (float)furthest ) {\n\n                    // Compute the new potential\n                    double newPot = 0;\n                    for (int i = 0; i < n; i++) {\n                        newPot += std::min( distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols)\n                                            , closestDistSq[i] );\n                    }\n\n                    // Store the best result\n                    if ((bestNewPot < 0)||(newPot <= bestNewPot)) {\n                        bestNewPot = newPot;\n                        bestNewIndex = index;\n                        furthest = closestDistSq[index];\n                    }\n                }\n            }\n\n            // Add the appropriate center\n            centers[centerCount] = dsindices[bestNewIndex];\n            for (int i = 0; i < n; i++) {\n                closestDistSq[i] = std::min( distance(dataset[dsindices[i]], dataset[dsindices[bestNewIndex]], dataset.cols)\n                                             , closestDistSq[i] );\n            }\n        }\n\n        centers_length = centerCount;\n\n        delete[] closestDistSq;\n    }\n\n\npublic:\n\n\n    /**\n     * Index constructor\n     *\n     * Params:\n     *          inputData = dataset with the input features\n     *          params = parameters passed to the hierarchical k-means algorithm\n     */\n    HierarchicalClusteringIndex(const Matrix<ElementType>& inputData, const IndexParams& index_params = HierarchicalClusteringIndexParams(),\n                                Distance d = Distance())\n        : dataset(inputData), params(index_params), root(NULL), indices(NULL), distance(d)\n    {\n        memoryCounter = 0;\n\n        size_ = dataset.rows;\n        veclen_ = dataset.cols;\n\n        branching_ = get_param(params,\"branching\",32);\n        centers_init_ = get_param(params,\"centers_init\", FLANN_CENTERS_RANDOM);\n        trees_ = get_param(params,\"trees\",4);\n        leaf_size_ = get_param(params,\"leaf_size\",100);\n\n        if (centers_init_==FLANN_CENTERS_RANDOM) {\n            chooseCenters = &HierarchicalClusteringIndex::chooseCentersRandom;\n        }\n        else if (centers_init_==FLANN_CENTERS_GONZALES) {\n            chooseCenters = &HierarchicalClusteringIndex::chooseCentersGonzales;\n        }\n        else if (centers_init_==FLANN_CENTERS_KMEANSPP) {\n            chooseCenters = &HierarchicalClusteringIndex::chooseCentersKMeanspp;\n        }\n        else if (centers_init_==FLANN_CENTERS_GROUPWISE) {\n            chooseCenters = &HierarchicalClusteringIndex::GroupWiseCenterChooser;\n        }\n        else {\n            throw FLANNException(\"Unknown algorithm for choosing initial centers.\");\n        }\n\n        trees_ = get_param(params,\"trees\",4);\n        root = new NodePtr[trees_];\n        indices = new int*[trees_];\n\n        for (int i=0; i<trees_; ++i) {\n            root[i] = NULL;\n            indices[i] = NULL;\n        }\n    }\n\n    HierarchicalClusteringIndex(const HierarchicalClusteringIndex&);\n    HierarchicalClusteringIndex& operator=(const HierarchicalClusteringIndex&);\n\n    /**\n     * Index destructor.\n     *\n     * Release the memory used by the index.\n     */\n    virtual ~HierarchicalClusteringIndex()\n    {\n        free_elements();\n\n        if (root!=NULL) {\n            delete[] root;\n        }\n\n        if (indices!=NULL) {\n            delete[] indices;\n        }\n    }\n\n\n    /**\n     * Release the inner elements of indices[]\n     */\n    void free_elements()\n    {\n        if (indices!=NULL) {\n            for(int i=0; i<trees_; ++i) {\n                if (indices[i]!=NULL) {\n                    delete[] indices[i];\n                    indices[i] = NULL;\n                }\n            }\n        }\n    }\n\n\n    /**\n     *  Returns size of index.\n     */\n    size_t size() const\n    {\n        return size_;\n    }\n\n    /**\n     * Returns the length of an index feature.\n     */\n    size_t veclen() const\n    {\n        return veclen_;\n    }\n\n\n    /**\n     * Computes the inde memory usage\n     * Returns: memory used by the index\n     */\n    int usedMemory() const\n    {\n        return pool.usedMemory+pool.wastedMemory+memoryCounter;\n    }\n\n    /**\n     * Builds the index\n     */\n    void buildIndex()\n    {\n        if (branching_<2) {\n            throw FLANNException(\"Branching factor must be at least 2\");\n        }\n\n        free_elements();\n\n        for (int i=0; i<trees_; ++i) {\n            indices[i] = new int[size_];\n            for (size_t j=0; j<size_; ++j) {\n                indices[i][j] = (int)j;\n            }\n            root[i] = pool.allocate<Node>();\n            computeClustering(root[i], indices[i], (int)size_, branching_,0);\n        }\n    }\n\n\n    flann_algorithm_t getType() const\n    {\n        return FLANN_INDEX_HIERARCHICAL;\n    }\n\n\n    void saveIndex(FILE* stream)\n    {\n        save_value(stream, branching_);\n        save_value(stream, trees_);\n        save_value(stream, centers_init_);\n        save_value(stream, leaf_size_);\n        save_value(stream, memoryCounter);\n        for (int i=0; i<trees_; ++i) {\n            save_value(stream, *indices[i], size_);\n            save_tree(stream, root[i], i);\n        }\n\n    }\n\n\n    void loadIndex(FILE* stream)\n    {\n        free_elements();\n\n        if (root!=NULL) {\n            delete[] root;\n        }\n\n        if (indices!=NULL) {\n            delete[] indices;\n        }\n\n        load_value(stream, branching_);\n        load_value(stream, trees_);\n        load_value(stream, centers_init_);\n        load_value(stream, leaf_size_);\n        load_value(stream, memoryCounter);\n\n        indices = new int*[trees_];\n        root = new NodePtr[trees_];\n        for (int i=0; i<trees_; ++i) {\n            indices[i] = new int[size_];\n            load_value(stream, *indices[i], size_);\n            load_tree(stream, root[i], i);\n        }\n\n        params[\"algorithm\"] = getType();\n        params[\"branching\"] = branching_;\n        params[\"trees\"] = trees_;\n        params[\"centers_init\"] = centers_init_;\n        params[\"leaf_size\"] = leaf_size_;\n    }\n\n\n    /**\n     * Find set of nearest neighbors to vec. Their indices are stored inside\n     * the result object.\n     *\n     * Params:\n     *     result = the result object in which the indices of the nearest-neighbors are stored\n     *     vec = the vector for which to search the nearest neighbors\n     *     searchParams = parameters that influence the search algorithm (checks)\n     */\n    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)\n    {\n\n        int maxChecks = get_param(searchParams,\"checks\",32);\n\n        // Priority queue storing intermediate branches in the best-bin-first search\n        Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);\n\n        std::vector<bool> checked(size_,false);\n        int checks = 0;\n        for (int i=0; i<trees_; ++i) {\n            findNN(root[i], result, vec, checks, maxChecks, heap, checked);\n        }\n\n        BranchSt branch;\n        while (heap->popMin(branch) && (checks<maxChecks || !result.full())) {\n            NodePtr node = branch.node;\n            findNN(node, result, vec, checks, maxChecks, heap, checked);\n        }\n        assert(result.full());\n\n        delete heap;\n\n    }\n\n    IndexParams getParameters() const\n    {\n        return params;\n    }\n\n\nprivate:\n\n    /**\n     * Struture representing a node in the hierarchical k-means tree.\n     */\n    struct Node\n    {\n        /**\n         * The cluster center index\n         */\n        int pivot;\n        /**\n         * The cluster size (number of points in the cluster)\n         */\n        int size;\n        /**\n         * Child nodes (only for non-terminal nodes)\n         */\n        Node** childs;\n        /**\n         * Node points (only for terminal nodes)\n         */\n        int* indices;\n        /**\n         * Level\n         */\n        int level;\n    };\n    typedef Node* NodePtr;\n\n\n\n    /**\n     * Alias definition for a nicer syntax.\n     */\n    typedef BranchStruct<NodePtr, DistanceType> BranchSt;\n\n\n\n    void save_tree(FILE* stream, NodePtr node, int num)\n    {\n        save_value(stream, *node);\n        if (node->childs==NULL) {\n            int indices_offset = (int)(node->indices - indices[num]);\n            save_value(stream, indices_offset);\n        }\n        else {\n            for(int i=0; i<branching_; ++i) {\n                save_tree(stream, node->childs[i], num);\n            }\n        }\n    }\n\n\n    void load_tree(FILE* stream, NodePtr& node, int num)\n    {\n        node = pool.allocate<Node>();\n        load_value(stream, *node);\n        if (node->childs==NULL) {\n            int indices_offset;\n            load_value(stream, indices_offset);\n            node->indices = indices[num] + indices_offset;\n        }\n        else {\n            node->childs = pool.allocate<NodePtr>(branching_);\n            for(int i=0; i<branching_; ++i) {\n                load_tree(stream, node->childs[i], num);\n            }\n        }\n    }\n\n\n\n\n    void computeLabels(int* dsindices, int indices_length,  int* centers, int centers_length, int* labels, DistanceType& cost)\n    {\n        cost = 0;\n        for (int i=0; i<indices_length; ++i) {\n            ElementType* point = dataset[dsindices[i]];\n            DistanceType dist = distance(point, dataset[centers[0]], veclen_);\n            labels[i] = 0;\n            for (int j=1; j<centers_length; ++j) {\n                DistanceType new_dist = distance(point, dataset[centers[j]], veclen_);\n                if (dist>new_dist) {\n                    labels[i] = j;\n                    dist = new_dist;\n                }\n            }\n            cost += dist;\n        }\n    }\n\n    /**\n     * The method responsible with actually doing the recursive hierarchical\n     * clustering\n     *\n     * Params:\n     *     node = the node to cluster\n     *     indices = indices of the points belonging to the current node\n     *     branching = the branching factor to use in the clustering\n     *\n     * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point)\n     */\n    void computeClustering(NodePtr node, int* dsindices, int indices_length, int branching, int level)\n    {\n        node->size = indices_length;\n        node->level = level;\n\n        if (indices_length < leaf_size_) { // leaf node\n            node->indices = dsindices;\n            std::sort(node->indices,node->indices+indices_length);\n            node->childs = NULL;\n            return;\n        }\n\n        std::vector<int> centers(branching);\n        std::vector<int> labels(indices_length);\n\n        int centers_length;\n        (this->*chooseCenters)(branching, dsindices, indices_length, &centers[0], centers_length);\n\n        if (centers_length<branching) {\n            node->indices = dsindices;\n            std::sort(node->indices,node->indices+indices_length);\n            node->childs = NULL;\n            return;\n        }\n\n\n        //\tassign points to clusters\n        DistanceType cost;\n        computeLabels(dsindices, indices_length, &centers[0], centers_length, &labels[0], cost);\n\n        node->childs = pool.allocate<NodePtr>(branching);\n        int start = 0;\n        int end = start;\n        for (int i=0; i<branching; ++i) {\n            for (int j=0; j<indices_length; ++j) {\n                if (labels[j]==i) {\n                    std::swap(dsindices[j],dsindices[end]);\n                    std::swap(labels[j],labels[end]);\n                    end++;\n                }\n            }\n\n            node->childs[i] = pool.allocate<Node>();\n            node->childs[i]->pivot = centers[i];\n            node->childs[i]->indices = NULL;\n            computeClustering(node->childs[i],dsindices+start, end-start, branching, level+1);\n            start=end;\n        }\n    }\n\n\n\n    /**\n     * Performs one descent in the hierarchical k-means tree. The branches not\n     * visited are stored in a priority queue.\n     *\n     * Params:\n     *      node = node to explore\n     *      result = container for the k-nearest neighbors found\n     *      vec = query points\n     *      checks = how many points in the dataset have been checked so far\n     *      maxChecks = maximum dataset points to checks\n     */\n\n\n    void findNN(NodePtr node, ResultSet<DistanceType>& result, const ElementType* vec, int& checks, int maxChecks,\n                Heap<BranchSt>* heap, std::vector<bool>& checked)\n    {\n        if (node->childs==NULL) {\n            if (checks>=maxChecks) {\n                if (result.full()) return;\n            }\n            for (int i=0; i<node->size; ++i) {\n                int index = node->indices[i];\n                if (!checked[index]) {\n                    DistanceType dist = distance(dataset[index], vec, veclen_);\n                    result.addPoint(dist, index);\n                    checked[index] = true;\n                    ++checks;\n                }\n            }\n        }\n        else {\n            DistanceType* domain_distances = new DistanceType[branching_];\n            int best_index = 0;\n            domain_distances[best_index] = distance(vec, dataset[node->childs[best_index]->pivot], veclen_);\n            for (int i=1; i<branching_; ++i) {\n                domain_distances[i] = distance(vec, dataset[node->childs[i]->pivot], veclen_);\n                if (domain_distances[i]<domain_distances[best_index]) {\n                    best_index = i;\n                }\n            }\n            for (int i=0; i<branching_; ++i) {\n                if (i!=best_index) {\n                    heap->insert(BranchSt(node->childs[i],domain_distances[i]));\n                }\n            }\n            delete[] domain_distances;\n            findNN(node->childs[best_index],result,vec, checks, maxChecks, heap, checked);\n        }\n    }\n\nprivate:\n\n\n    /**\n     * The dataset used by this index\n     */\n    const Matrix<ElementType> dataset;\n\n    /**\n     * Parameters used by this index\n     */\n    IndexParams params;\n\n\n    /**\n     * Number of features in the dataset.\n     */\n    size_t size_;\n\n    /**\n     * Length of each feature.\n     */\n    size_t veclen_;\n\n    /**\n     * The root node in the tree.\n     */\n    NodePtr* root;\n\n    /**\n     *  Array of indices to vectors in the dataset.\n     */\n    int** indices;\n\n\n    /**\n     * The distance\n     */\n    Distance distance;\n\n    /**\n     * Pooled memory allocator.\n     *\n     * Using a pooled memory allocator is more efficient\n     * than allocating memory directly when there is a large\n     * number small of memory allocations.\n     */\n    PooledAllocator pool;\n\n    /**\n     * Memory occupied by the index.\n     */\n    int memoryCounter;\n\n    /** index parameters */\n    int branching_;\n    int trees_;\n    flann_centers_init_t centers_init_;\n    int leaf_size_;\n\n\n};\n\n}\n\n#endif /* OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/index_testing.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_INDEX_TESTING_H_\n#define OPENCV_FLANN_INDEX_TESTING_H_\n\n#include <cstring>\n#include <cassert>\n#include <cmath>\n\n#include \"matrix.h\"\n#include \"nn_index.h\"\n#include \"result_set.h\"\n#include \"logger.h\"\n#include \"timer.h\"\n\n\nnamespace cvflann\n{\n\ninline int countCorrectMatches(int* neighbors, int* groundTruth, int n)\n{\n    int count = 0;\n    for (int i=0; i<n; ++i) {\n        for (int k=0; k<n; ++k) {\n            if (neighbors[i]==groundTruth[k]) {\n                count++;\n                break;\n            }\n        }\n    }\n    return count;\n}\n\n\ntemplate <typename Distance>\ntypename Distance::ResultType computeDistanceRaport(const Matrix<typename Distance::ElementType>& inputData, typename Distance::ElementType* target,\n                                                    int* neighbors, int* groundTruth, int veclen, int n, const Distance& distance)\n{\n    typedef typename Distance::ResultType DistanceType;\n\n    DistanceType ret = 0;\n    for (int i=0; i<n; ++i) {\n        DistanceType den = distance(inputData[groundTruth[i]], target, veclen);\n        DistanceType num = distance(inputData[neighbors[i]], target, veclen);\n\n        if ((den==0)&&(num==0)) {\n            ret += 1;\n        }\n        else {\n            ret += num/den;\n        }\n    }\n\n    return ret;\n}\n\ntemplate <typename Distance>\nfloat search_with_ground_truth(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,\n                               const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches, int nn, int checks,\n                               float& time, typename Distance::ResultType& dist, const Distance& distance, int skipMatches)\n{\n    typedef typename Distance::ResultType DistanceType;\n\n    if (matches.cols<size_t(nn)) {\n        Logger::info(\"matches.cols=%d, nn=%d\\n\",matches.cols,nn);\n\n        throw FLANNException(\"Ground truth is not computed for as many neighbors as requested\");\n    }\n\n    KNNResultSet<DistanceType> resultSet(nn+skipMatches);\n    SearchParams searchParams(checks);\n\n    std::vector<int> indices(nn+skipMatches);\n    std::vector<DistanceType> dists(nn+skipMatches);\n    int* neighbors = &indices[skipMatches];\n\n    int correct = 0;\n    DistanceType distR = 0;\n    StartStopTimer t;\n    int repeats = 0;\n    while (t.value<0.2) {\n        repeats++;\n        t.start();\n        correct = 0;\n        distR = 0;\n        for (size_t i = 0; i < testData.rows; i++) {\n            resultSet.init(&indices[0], &dists[0]);\n            index.findNeighbors(resultSet, testData[i], searchParams);\n\n            correct += countCorrectMatches(neighbors,matches[i], nn);\n            distR += computeDistanceRaport<Distance>(inputData, testData[i], neighbors, matches[i], (int)testData.cols, nn, distance);\n        }\n        t.stop();\n    }\n    time = float(t.value/repeats);\n\n    float precicion = (float)correct/(nn*testData.rows);\n\n    dist = distR/(testData.rows*nn);\n\n    Logger::info(\"%8d %10.4g %10.5g %10.5g %10.5g\\n\",\n                 checks, precicion, time, 1000.0 * time / testData.rows, dist);\n\n    return precicion;\n}\n\n\ntemplate <typename Distance>\nfloat test_index_checks(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,\n                        const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches,\n                        int checks, float& precision, const Distance& distance, int nn = 1, int skipMatches = 0)\n{\n    typedef typename Distance::ResultType DistanceType;\n\n    Logger::info(\"  Nodes  Precision(%)   Time(s)   Time/vec(ms)  Mean dist\\n\");\n    Logger::info(\"---------------------------------------------------------\\n\");\n\n    float time = 0;\n    DistanceType dist = 0;\n    precision = search_with_ground_truth(index, inputData, testData, matches, nn, checks, time, dist, distance, skipMatches);\n\n    return time;\n}\n\ntemplate <typename Distance>\nfloat test_index_precision(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,\n                           const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches,\n                           float precision, int& checks, const Distance& distance, int nn = 1, int skipMatches = 0)\n{\n    typedef typename Distance::ResultType DistanceType;\n    const float SEARCH_EPS = 0.001f;\n\n    Logger::info(\"  Nodes  Precision(%)   Time(s)   Time/vec(ms)  Mean dist\\n\");\n    Logger::info(\"---------------------------------------------------------\\n\");\n\n    int c2 = 1;\n    float p2;\n    int c1 = 1;\n    //float p1;\n    float time;\n    DistanceType dist;\n\n    p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);\n\n    if (p2>precision) {\n        Logger::info(\"Got as close as I can\\n\");\n        checks = c2;\n        return time;\n    }\n\n    while (p2<precision) {\n        c1 = c2;\n        //p1 = p2;\n        c2 *=2;\n        p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);\n    }\n\n    int cx;\n    float realPrecision;\n    if (fabs(p2-precision)>SEARCH_EPS) {\n        Logger::info(\"Start linear estimation\\n\");\n        // after we got to values in the vecinity of the desired precision\n        // use linear approximation get a better estimation\n\n        cx = (c1+c2)/2;\n        realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);\n        while (fabs(realPrecision-precision)>SEARCH_EPS) {\n\n            if (realPrecision<precision) {\n                c1 = cx;\n            }\n            else {\n                c2 = cx;\n            }\n            cx = (c1+c2)/2;\n            if (cx==c1) {\n                Logger::info(\"Got as close as I can\\n\");\n                break;\n            }\n            realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);\n        }\n\n        c2 = cx;\n        p2 = realPrecision;\n\n    }\n    else {\n        Logger::info(\"No need for linear estimation\\n\");\n        cx = c2;\n        realPrecision = p2;\n    }\n\n    checks = cx;\n    return time;\n}\n\n\ntemplate <typename Distance>\nvoid test_index_precisions(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,\n                           const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches,\n                           float* precisions, int precisions_length, const Distance& distance, int nn = 1, int skipMatches = 0, float maxTime = 0)\n{\n    typedef typename Distance::ResultType DistanceType;\n\n    const float SEARCH_EPS = 0.001;\n\n    // make sure precisions array is sorted\n    std::sort(precisions, precisions+precisions_length);\n\n    int pindex = 0;\n    float precision = precisions[pindex];\n\n    Logger::info(\"  Nodes  Precision(%)   Time(s)   Time/vec(ms)  Mean dist\\n\");\n    Logger::info(\"---------------------------------------------------------\\n\");\n\n    int c2 = 1;\n    float p2;\n\n    int c1 = 1;\n    float p1;\n\n    float time;\n    DistanceType dist;\n\n    p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);\n\n    // if precision for 1 run down the tree is already\n    // better then some of the requested precisions, then\n    // skip those\n    while (precisions[pindex]<p2 && pindex<precisions_length) {\n        pindex++;\n    }\n\n    if (pindex==precisions_length) {\n        Logger::info(\"Got as close as I can\\n\");\n        return;\n    }\n\n    for (int i=pindex; i<precisions_length; ++i) {\n\n        precision = precisions[i];\n        while (p2<precision) {\n            c1 = c2;\n            p1 = p2;\n            c2 *=2;\n            p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);\n            if ((maxTime> 0)&&(time > maxTime)&&(p2<precision)) return;\n        }\n\n        int cx;\n        float realPrecision;\n        if (fabs(p2-precision)>SEARCH_EPS) {\n            Logger::info(\"Start linear estimation\\n\");\n            // after we got to values in the vecinity of the desired precision\n            // use linear approximation get a better estimation\n\n            cx = (c1+c2)/2;\n            realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);\n            while (fabs(realPrecision-precision)>SEARCH_EPS) {\n\n                if (realPrecision<precision) {\n                    c1 = cx;\n                }\n                else {\n                    c2 = cx;\n                }\n                cx = (c1+c2)/2;\n                if (cx==c1) {\n                    Logger::info(\"Got as close as I can\\n\");\n                    break;\n                }\n                realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);\n            }\n\n            c2 = cx;\n            p2 = realPrecision;\n\n        }\n        else {\n            Logger::info(\"No need for linear estimation\\n\");\n            cx = c2;\n            realPrecision = p2;\n        }\n\n    }\n}\n\n}\n\n#endif //OPENCV_FLANN_INDEX_TESTING_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/kdtree_index.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_KDTREE_INDEX_H_\n#define OPENCV_FLANN_KDTREE_INDEX_H_\n\n#include <algorithm>\n#include <map>\n#include <cassert>\n#include <cstring>\n\n#include \"general.h\"\n#include \"nn_index.h\"\n#include \"dynamic_bitset.h\"\n#include \"matrix.h\"\n#include \"result_set.h\"\n#include \"heap.h\"\n#include \"allocator.h\"\n#include \"random.h\"\n#include \"saving.h\"\n\n\nnamespace cvflann\n{\n\nstruct KDTreeIndexParams : public IndexParams\n{\n    KDTreeIndexParams(int trees = 4)\n    {\n        (*this)[\"algorithm\"] = FLANN_INDEX_KDTREE;\n        (*this)[\"trees\"] = trees;\n    }\n};\n\n\n/**\n * Randomized kd-tree index\n *\n * Contains the k-d trees and other information for indexing a set of points\n * for nearest-neighbor matching.\n */\ntemplate <typename Distance>\nclass KDTreeIndex : public NNIndex<Distance>\n{\npublic:\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\n\n    /**\n     * KDTree constructor\n     *\n     * Params:\n     *          inputData = dataset with the input features\n     *          params = parameters passed to the kdtree algorithm\n     */\n    KDTreeIndex(const Matrix<ElementType>& inputData, const IndexParams& params = KDTreeIndexParams(),\n                Distance d = Distance() ) :\n        dataset_(inputData), index_params_(params), distance_(d)\n    {\n        size_ = dataset_.rows;\n        veclen_ = dataset_.cols;\n\n        trees_ = get_param(index_params_,\"trees\",4);\n        tree_roots_ = new NodePtr[trees_];\n\n        // Create a permutable array of indices to the input vectors.\n        vind_.resize(size_);\n        for (size_t i = 0; i < size_; ++i) {\n            vind_[i] = int(i);\n        }\n\n        mean_ = new DistanceType[veclen_];\n        var_ = new DistanceType[veclen_];\n    }\n\n\n    KDTreeIndex(const KDTreeIndex&);\n    KDTreeIndex& operator=(const KDTreeIndex&);\n\n    /**\n     * Standard destructor\n     */\n    ~KDTreeIndex()\n    {\n        if (tree_roots_!=NULL) {\n            delete[] tree_roots_;\n        }\n        delete[] mean_;\n        delete[] var_;\n    }\n\n    /**\n     * Builds the index\n     */\n    void buildIndex()\n    {\n        /* Construct the randomized trees. */\n        for (int i = 0; i < trees_; i++) {\n            /* Randomize the order of vectors to allow for unbiased sampling. */\n            std::random_shuffle(vind_.begin(), vind_.end());\n            tree_roots_[i] = divideTree(&vind_[0], int(size_) );\n        }\n    }\n\n\n    flann_algorithm_t getType() const\n    {\n        return FLANN_INDEX_KDTREE;\n    }\n\n\n    void saveIndex(FILE* stream)\n    {\n        save_value(stream, trees_);\n        for (int i=0; i<trees_; ++i) {\n            save_tree(stream, tree_roots_[i]);\n        }\n    }\n\n\n\n    void loadIndex(FILE* stream)\n    {\n        load_value(stream, trees_);\n        if (tree_roots_!=NULL) {\n            delete[] tree_roots_;\n        }\n        tree_roots_ = new NodePtr[trees_];\n        for (int i=0; i<trees_; ++i) {\n            load_tree(stream,tree_roots_[i]);\n        }\n\n        index_params_[\"algorithm\"] = getType();\n        index_params_[\"trees\"] = tree_roots_;\n    }\n\n    /**\n     *  Returns size of index.\n     */\n    size_t size() const\n    {\n        return size_;\n    }\n\n    /**\n     * Returns the length of an index feature.\n     */\n    size_t veclen() const\n    {\n        return veclen_;\n    }\n\n    /**\n     * Computes the inde memory usage\n     * Returns: memory used by the index\n     */\n    int usedMemory() const\n    {\n        return int(pool_.usedMemory+pool_.wastedMemory+dataset_.rows*sizeof(int));  // pool memory and vind array memory\n    }\n\n    /**\n     * Find set of nearest neighbors to vec. Their indices are stored inside\n     * the result object.\n     *\n     * Params:\n     *     result = the result object in which the indices of the nearest-neighbors are stored\n     *     vec = the vector for which to search the nearest neighbors\n     *     maxCheck = the maximum number of restarts (in a best-bin-first manner)\n     */\n    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)\n    {\n        int maxChecks = get_param(searchParams,\"checks\", 32);\n        float epsError = 1+get_param(searchParams,\"eps\",0.0f);\n\n        if (maxChecks==FLANN_CHECKS_UNLIMITED) {\n            getExactNeighbors(result, vec, epsError);\n        }\n        else {\n            getNeighbors(result, vec, maxChecks, epsError);\n        }\n    }\n\n    IndexParams getParameters() const\n    {\n        return index_params_;\n    }\n\nprivate:\n\n\n    /*--------------------- Internal Data Structures --------------------------*/\n    struct Node\n    {\n        /**\n         * Dimension used for subdivision.\n         */\n        int divfeat;\n        /**\n         * The values used for subdivision.\n         */\n        DistanceType divval;\n        /**\n         * The child nodes.\n         */\n        Node* child1, * child2;\n    };\n    typedef Node* NodePtr;\n    typedef BranchStruct<NodePtr, DistanceType> BranchSt;\n    typedef BranchSt* Branch;\n\n\n\n    void save_tree(FILE* stream, NodePtr tree)\n    {\n        save_value(stream, *tree);\n        if (tree->child1!=NULL) {\n            save_tree(stream, tree->child1);\n        }\n        if (tree->child2!=NULL) {\n            save_tree(stream, tree->child2);\n        }\n    }\n\n\n    void load_tree(FILE* stream, NodePtr& tree)\n    {\n        tree = pool_.allocate<Node>();\n        load_value(stream, *tree);\n        if (tree->child1!=NULL) {\n            load_tree(stream, tree->child1);\n        }\n        if (tree->child2!=NULL) {\n            load_tree(stream, tree->child2);\n        }\n    }\n\n\n    /**\n     * Create a tree node that subdivides the list of vecs from vind[first]\n     * to vind[last].  The routine is called recursively on each sublist.\n     * Place a pointer to this new tree node in the location pTree.\n     *\n     * Params: pTree = the new node to create\n     *                  first = index of the first vector\n     *                  last = index of the last vector\n     */\n    NodePtr divideTree(int* ind, int count)\n    {\n        NodePtr node = pool_.allocate<Node>(); // allocate memory\n\n        /* If too few exemplars remain, then make this a leaf node. */\n        if ( count == 1) {\n            node->child1 = node->child2 = NULL;    /* Mark as leaf node. */\n            node->divfeat = *ind;    /* Store index of this vec. */\n        }\n        else {\n            int idx;\n            int cutfeat;\n            DistanceType cutval;\n            meanSplit(ind, count, idx, cutfeat, cutval);\n\n            node->divfeat = cutfeat;\n            node->divval = cutval;\n            node->child1 = divideTree(ind, idx);\n            node->child2 = divideTree(ind+idx, count-idx);\n        }\n\n        return node;\n    }\n\n\n    /**\n     * Choose which feature to use in order to subdivide this set of vectors.\n     * Make a random choice among those with the highest variance, and use\n     * its variance as the threshold value.\n     */\n    void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval)\n    {\n        memset(mean_,0,veclen_*sizeof(DistanceType));\n        memset(var_,0,veclen_*sizeof(DistanceType));\n\n        /* Compute mean values.  Only the first SAMPLE_MEAN values need to be\n            sampled to get a good estimate.\n         */\n        int cnt = std::min((int)SAMPLE_MEAN+1, count);\n        for (int j = 0; j < cnt; ++j) {\n            ElementType* v = dataset_[ind[j]];\n            for (size_t k=0; k<veclen_; ++k) {\n                mean_[k] += v[k];\n            }\n        }\n        for (size_t k=0; k<veclen_; ++k) {\n            mean_[k] /= cnt;\n        }\n\n        /* Compute variances (no need to divide by count). */\n        for (int j = 0; j < cnt; ++j) {\n            ElementType* v = dataset_[ind[j]];\n            for (size_t k=0; k<veclen_; ++k) {\n                DistanceType dist = v[k] - mean_[k];\n                var_[k] += dist * dist;\n            }\n        }\n        /* Select one of the highest variance indices at random. */\n        cutfeat = selectDivision(var_);\n        cutval = mean_[cutfeat];\n\n        int lim1, lim2;\n        planeSplit(ind, count, cutfeat, cutval, lim1, lim2);\n\n        if (lim1>count/2) index = lim1;\n        else if (lim2<count/2) index = lim2;\n        else index = count/2;\n\n        /* If either list is empty, it means that all remaining features\n         * are identical. Split in the middle to maintain a balanced tree.\n         */\n        if ((lim1==count)||(lim2==0)) index = count/2;\n    }\n\n\n    /**\n     * Select the top RAND_DIM largest values from v and return the index of\n     * one of these selected at random.\n     */\n    int selectDivision(DistanceType* v)\n    {\n        int num = 0;\n        size_t topind[RAND_DIM];\n\n        /* Create a list of the indices of the top RAND_DIM values. */\n        for (size_t i = 0; i < veclen_; ++i) {\n            if ((num < RAND_DIM)||(v[i] > v[topind[num-1]])) {\n                /* Put this element at end of topind. */\n                if (num < RAND_DIM) {\n                    topind[num++] = i;            /* Add to list. */\n                }\n                else {\n                    topind[num-1] = i;         /* Replace last element. */\n                }\n                /* Bubble end value down to right location by repeated swapping. */\n                int j = num - 1;\n                while (j > 0  &&  v[topind[j]] > v[topind[j-1]]) {\n                    std::swap(topind[j], topind[j-1]);\n                    --j;\n                }\n            }\n        }\n        /* Select a random integer in range [0,num-1], and return that index. */\n        int rnd = rand_int(num);\n        return (int)topind[rnd];\n    }\n\n\n    /**\n     *  Subdivide the list of points by a plane perpendicular on axe corresponding\n     *  to the 'cutfeat' dimension at 'cutval' position.\n     *\n     *  On return:\n     *  dataset[ind[0..lim1-1]][cutfeat]<cutval\n     *  dataset[ind[lim1..lim2-1]][cutfeat]==cutval\n     *  dataset[ind[lim2..count]][cutfeat]>cutval\n     */\n    void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2)\n    {\n        /* Move vector indices for left subtree to front of list. */\n        int left = 0;\n        int right = count-1;\n        for (;; ) {\n            while (left<=right && dataset_[ind[left]][cutfeat]<cutval) ++left;\n            while (left<=right && dataset_[ind[right]][cutfeat]>=cutval) --right;\n            if (left>right) break;\n            std::swap(ind[left], ind[right]); ++left; --right;\n        }\n        lim1 = left;\n        right = count-1;\n        for (;; ) {\n            while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left;\n            while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right;\n            if (left>right) break;\n            std::swap(ind[left], ind[right]); ++left; --right;\n        }\n        lim2 = left;\n    }\n\n    /**\n     * Performs an exact nearest neighbor search. The exact search performs a full\n     * traversal of the tree.\n     */\n    void getExactNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, float epsError)\n    {\n        //\t\tcheckID -= 1;  /* Set a different unique ID for each search. */\n\n        if (trees_ > 1) {\n            fprintf(stderr,\"It doesn't make any sense to use more than one tree for exact search\");\n        }\n        if (trees_>0) {\n            searchLevelExact(result, vec, tree_roots_[0], 0.0, epsError);\n        }\n        assert(result.full());\n    }\n\n    /**\n     * Performs the approximate nearest-neighbor search. The search is approximate\n     * because the tree traversal is abandoned after a given number of descends in\n     * the tree.\n     */\n    void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError)\n    {\n        int i;\n        BranchSt branch;\n\n        int checkCount = 0;\n        Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);\n        DynamicBitset checked(size_);\n\n        /* Search once through each tree down to root. */\n        for (i = 0; i < trees_; ++i) {\n            searchLevel(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked);\n        }\n\n        /* Keep searching other branches from heap until finished. */\n        while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) {\n            searchLevel(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked);\n        }\n\n        delete heap;\n\n        assert(result.full());\n    }\n\n\n    /**\n     *  Search starting from a given node of the tree.  Based on any mismatches at\n     *  higher levels, all exemplars below this level must have a distance of\n     *  at least \"mindistsq\".\n     */\n    void searchLevel(ResultSet<DistanceType>& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck,\n                     float epsError, Heap<BranchSt>* heap, DynamicBitset& checked)\n    {\n        if (result_set.worstDist()<mindist) {\n            //\t\t\tprintf(\"Ignoring branch, too far\\n\");\n            return;\n        }\n\n        /* If this is a leaf node, then do check and return. */\n        if ((node->child1 == NULL)&&(node->child2 == NULL)) {\n            /*  Do not check same node more than once when searching multiple trees.\n                Once a vector is checked, we set its location in vind to the\n                current checkID.\n             */\n            int index = node->divfeat;\n            if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return;\n            checked.set(index);\n            checkCount++;\n\n            DistanceType dist = distance_(dataset_[index], vec, veclen_);\n            result_set.addPoint(dist,index);\n\n            return;\n        }\n\n        /* Which child branch should be taken first? */\n        ElementType val = vec[node->divfeat];\n        DistanceType diff = val - node->divval;\n        NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;\n        NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;\n\n        /* Create a branch record for the branch not taken.  Add distance\n            of this feature boundary (we don't attempt to correct for any\n            use of this feature in a parent node, which is unlikely to\n            happen and would have only a small effect).  Don't bother\n            adding more branches to heap after halfway point, as cost of\n            adding exceeds their value.\n         */\n\n        DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);\n        //\t\tif (2 * checkCount < maxCheck  ||  !result.full()) {\n        if ((new_distsq*epsError < result_set.worstDist())||  !result_set.full()) {\n            heap->insert( BranchSt(otherChild, new_distsq) );\n        }\n\n        /* Call recursively to search next level down. */\n        searchLevel(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked);\n    }\n\n    /**\n     * Performs an exact search in the tree starting from a node.\n     */\n    void searchLevelExact(ResultSet<DistanceType>& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError)\n    {\n        /* If this is a leaf node, then do check and return. */\n        if ((node->child1 == NULL)&&(node->child2 == NULL)) {\n            int index = node->divfeat;\n            DistanceType dist = distance_(dataset_[index], vec, veclen_);\n            result_set.addPoint(dist,index);\n            return;\n        }\n\n        /* Which child branch should be taken first? */\n        ElementType val = vec[node->divfeat];\n        DistanceType diff = val - node->divval;\n        NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;\n        NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;\n\n        /* Create a branch record for the branch not taken.  Add distance\n            of this feature boundary (we don't attempt to correct for any\n            use of this feature in a parent node, which is unlikely to\n            happen and would have only a small effect).  Don't bother\n            adding more branches to heap after halfway point, as cost of\n            adding exceeds their value.\n         */\n\n        DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);\n\n        /* Call recursively to search next level down. */\n        searchLevelExact(result_set, vec, bestChild, mindist, epsError);\n\n        if (new_distsq*epsError<=result_set.worstDist()) {\n            searchLevelExact(result_set, vec, otherChild, new_distsq, epsError);\n        }\n    }\n\n\nprivate:\n\n    enum\n    {\n        /**\n         * To improve efficiency, only SAMPLE_MEAN random values are used to\n         * compute the mean and variance at each level when building a tree.\n         * A value of 100 seems to perform as well as using all values.\n         */\n        SAMPLE_MEAN = 100,\n        /**\n         * Top random dimensions to consider\n         *\n         * When creating random trees, the dimension on which to subdivide is\n         * selected at random from among the top RAND_DIM dimensions with the\n         * highest variance.  A value of 5 works well.\n         */\n        RAND_DIM=5\n    };\n\n\n    /**\n     * Number of randomized trees that are used\n     */\n    int trees_;\n\n    /**\n     *  Array of indices to vectors in the dataset.\n     */\n    std::vector<int> vind_;\n\n    /**\n     * The dataset used by this index\n     */\n    const Matrix<ElementType> dataset_;\n\n    IndexParams index_params_;\n\n    size_t size_;\n    size_t veclen_;\n\n\n    DistanceType* mean_;\n    DistanceType* var_;\n\n\n    /**\n     * Array of k-d trees used to find neighbours.\n     */\n    NodePtr* tree_roots_;\n\n    /**\n     * Pooled memory allocator.\n     *\n     * Using a pooled memory allocator is more efficient\n     * than allocating memory directly when there is a large\n     * number small of memory allocations.\n     */\n    PooledAllocator pool_;\n\n    Distance distance_;\n\n\n};   // class KDTreeForest\n\n}\n\n#endif //OPENCV_FLANN_KDTREE_INDEX_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/kdtree_single_index.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_\n#define OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_\n\n#include <algorithm>\n#include <map>\n#include <cassert>\n#include <cstring>\n\n#include \"general.h\"\n#include \"nn_index.h\"\n#include \"matrix.h\"\n#include \"result_set.h\"\n#include \"heap.h\"\n#include \"allocator.h\"\n#include \"random.h\"\n#include \"saving.h\"\n\nnamespace cvflann\n{\n\nstruct KDTreeSingleIndexParams : public IndexParams\n{\n    KDTreeSingleIndexParams(int leaf_max_size = 10, bool reorder = true, int dim = -1)\n    {\n        (*this)[\"algorithm\"] = FLANN_INDEX_KDTREE_SINGLE;\n        (*this)[\"leaf_max_size\"] = leaf_max_size;\n        (*this)[\"reorder\"] = reorder;\n        (*this)[\"dim\"] = dim;\n    }\n};\n\n\n/**\n * Randomized kd-tree index\n *\n * Contains the k-d trees and other information for indexing a set of points\n * for nearest-neighbor matching.\n */\ntemplate <typename Distance>\nclass KDTreeSingleIndex : public NNIndex<Distance>\n{\npublic:\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\n\n    /**\n     * KDTree constructor\n     *\n     * Params:\n     *          inputData = dataset with the input features\n     *          params = parameters passed to the kdtree algorithm\n     */\n    KDTreeSingleIndex(const Matrix<ElementType>& inputData, const IndexParams& params = KDTreeSingleIndexParams(),\n                      Distance d = Distance() ) :\n        dataset_(inputData), index_params_(params), distance_(d)\n    {\n        size_ = dataset_.rows;\n        dim_ = dataset_.cols;\n        int dim_param = get_param(params,\"dim\",-1);\n        if (dim_param>0) dim_ = dim_param;\n        leaf_max_size_ = get_param(params,\"leaf_max_size\",10);\n        reorder_ = get_param(params,\"reorder\",true);\n\n        // Create a permutable array of indices to the input vectors.\n        vind_.resize(size_);\n        for (size_t i = 0; i < size_; i++) {\n            vind_[i] = (int)i;\n        }\n    }\n\n    KDTreeSingleIndex(const KDTreeSingleIndex&);\n    KDTreeSingleIndex& operator=(const KDTreeSingleIndex&);\n\n    /**\n     * Standard destructor\n     */\n    ~KDTreeSingleIndex()\n    {\n        if (reorder_) delete[] data_.data;\n    }\n\n    /**\n     * Builds the index\n     */\n    void buildIndex()\n    {\n        computeBoundingBox(root_bbox_);\n        root_node_ = divideTree(0, (int)size_, root_bbox_ );   // construct the tree\n\n        if (reorder_) {\n            delete[] data_.data;\n            data_ = cvflann::Matrix<ElementType>(new ElementType[size_*dim_], size_, dim_);\n            for (size_t i=0; i<size_; ++i) {\n                for (size_t j=0; j<dim_; ++j) {\n                    data_[i][j] = dataset_[vind_[i]][j];\n                }\n            }\n        }\n        else {\n            data_ = dataset_;\n        }\n    }\n\n    flann_algorithm_t getType() const\n    {\n        return FLANN_INDEX_KDTREE_SINGLE;\n    }\n\n\n    void saveIndex(FILE* stream)\n    {\n        save_value(stream, size_);\n        save_value(stream, dim_);\n        save_value(stream, root_bbox_);\n        save_value(stream, reorder_);\n        save_value(stream, leaf_max_size_);\n        save_value(stream, vind_);\n        if (reorder_) {\n            save_value(stream, data_);\n        }\n        save_tree(stream, root_node_);\n    }\n\n\n    void loadIndex(FILE* stream)\n    {\n        load_value(stream, size_);\n        load_value(stream, dim_);\n        load_value(stream, root_bbox_);\n        load_value(stream, reorder_);\n        load_value(stream, leaf_max_size_);\n        load_value(stream, vind_);\n        if (reorder_) {\n            load_value(stream, data_);\n        }\n        else {\n            data_ = dataset_;\n        }\n        load_tree(stream, root_node_);\n\n\n        index_params_[\"algorithm\"] = getType();\n        index_params_[\"leaf_max_size\"] = leaf_max_size_;\n        index_params_[\"reorder\"] = reorder_;\n    }\n\n    /**\n     *  Returns size of index.\n     */\n    size_t size() const\n    {\n        return size_;\n    }\n\n    /**\n     * Returns the length of an index feature.\n     */\n    size_t veclen() const\n    {\n        return dim_;\n    }\n\n    /**\n     * Computes the inde memory usage\n     * Returns: memory used by the index\n     */\n    int usedMemory() const\n    {\n        return (int)(pool_.usedMemory+pool_.wastedMemory+dataset_.rows*sizeof(int));  // pool memory and vind array memory\n    }\n\n\n    /**\n     * \\brief Perform k-nearest neighbor search\n     * \\param[in] queries The query points for which to find the nearest neighbors\n     * \\param[out] indices The indices of the nearest neighbors found\n     * \\param[out] dists Distances to the nearest neighbors found\n     * \\param[in] knn Number of nearest neighbors to return\n     * \\param[in] params Search parameters\n     */\n    void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)\n    {\n        assert(queries.cols == veclen());\n        assert(indices.rows >= queries.rows);\n        assert(dists.rows >= queries.rows);\n        assert(int(indices.cols) >= knn);\n        assert(int(dists.cols) >= knn);\n\n        KNNSimpleResultSet<DistanceType> resultSet(knn);\n        for (size_t i = 0; i < queries.rows; i++) {\n            resultSet.init(indices[i], dists[i]);\n            findNeighbors(resultSet, queries[i], params);\n        }\n    }\n\n    IndexParams getParameters() const\n    {\n        return index_params_;\n    }\n\n    /**\n     * Find set of nearest neighbors to vec. Their indices are stored inside\n     * the result object.\n     *\n     * Params:\n     *     result = the result object in which the indices of the nearest-neighbors are stored\n     *     vec = the vector for which to search the nearest neighbors\n     *     maxCheck = the maximum number of restarts (in a best-bin-first manner)\n     */\n    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)\n    {\n        float epsError = 1+get_param(searchParams,\"eps\",0.0f);\n\n        std::vector<DistanceType> dists(dim_,0);\n        DistanceType distsq = computeInitialDistances(vec, dists);\n        searchLevel(result, vec, root_node_, distsq, dists, epsError);\n    }\n\nprivate:\n\n\n    /*--------------------- Internal Data Structures --------------------------*/\n    struct Node\n    {\n        /**\n         * Indices of points in leaf node\n         */\n        int left, right;\n        /**\n         * Dimension used for subdivision.\n         */\n        int divfeat;\n        /**\n         * The values used for subdivision.\n         */\n        DistanceType divlow, divhigh;\n        /**\n         * The child nodes.\n         */\n        Node* child1, * child2;\n    };\n    typedef Node* NodePtr;\n\n\n    struct Interval\n    {\n        DistanceType low, high;\n    };\n\n    typedef std::vector<Interval> BoundingBox;\n\n    typedef BranchStruct<NodePtr, DistanceType> BranchSt;\n    typedef BranchSt* Branch;\n\n\n\n\n    void save_tree(FILE* stream, NodePtr tree)\n    {\n        save_value(stream, *tree);\n        if (tree->child1!=NULL) {\n            save_tree(stream, tree->child1);\n        }\n        if (tree->child2!=NULL) {\n            save_tree(stream, tree->child2);\n        }\n    }\n\n\n    void load_tree(FILE* stream, NodePtr& tree)\n    {\n        tree = pool_.allocate<Node>();\n        load_value(stream, *tree);\n        if (tree->child1!=NULL) {\n            load_tree(stream, tree->child1);\n        }\n        if (tree->child2!=NULL) {\n            load_tree(stream, tree->child2);\n        }\n    }\n\n\n    void computeBoundingBox(BoundingBox& bbox)\n    {\n        bbox.resize(dim_);\n        for (size_t i=0; i<dim_; ++i) {\n            bbox[i].low = (DistanceType)dataset_[0][i];\n            bbox[i].high = (DistanceType)dataset_[0][i];\n        }\n        for (size_t k=1; k<dataset_.rows; ++k) {\n            for (size_t i=0; i<dim_; ++i) {\n                if (dataset_[k][i]<bbox[i].low) bbox[i].low = (DistanceType)dataset_[k][i];\n                if (dataset_[k][i]>bbox[i].high) bbox[i].high = (DistanceType)dataset_[k][i];\n            }\n        }\n    }\n\n\n    /**\n     * Create a tree node that subdivides the list of vecs from vind[first]\n     * to vind[last].  The routine is called recursively on each sublist.\n     * Place a pointer to this new tree node in the location pTree.\n     *\n     * Params: pTree = the new node to create\n     *                  first = index of the first vector\n     *                  last = index of the last vector\n     */\n    NodePtr divideTree(int left, int right, BoundingBox& bbox)\n    {\n        NodePtr node = pool_.allocate<Node>(); // allocate memory\n\n        /* If too few exemplars remain, then make this a leaf node. */\n        if ( (right-left) <= leaf_max_size_) {\n            node->child1 = node->child2 = NULL;    /* Mark as leaf node. */\n            node->left = left;\n            node->right = right;\n\n            // compute bounding-box of leaf points\n            for (size_t i=0; i<dim_; ++i) {\n                bbox[i].low = (DistanceType)dataset_[vind_[left]][i];\n                bbox[i].high = (DistanceType)dataset_[vind_[left]][i];\n            }\n            for (int k=left+1; k<right; ++k) {\n                for (size_t i=0; i<dim_; ++i) {\n                    if (bbox[i].low>dataset_[vind_[k]][i]) bbox[i].low=(DistanceType)dataset_[vind_[k]][i];\n                    if (bbox[i].high<dataset_[vind_[k]][i]) bbox[i].high=(DistanceType)dataset_[vind_[k]][i];\n                }\n            }\n        }\n        else {\n            int idx;\n            int cutfeat;\n            DistanceType cutval;\n            middleSplit_(&vind_[0]+left, right-left, idx, cutfeat, cutval, bbox);\n\n            node->divfeat = cutfeat;\n\n            BoundingBox left_bbox(bbox);\n            left_bbox[cutfeat].high = cutval;\n            node->child1 = divideTree(left, left+idx, left_bbox);\n\n            BoundingBox right_bbox(bbox);\n            right_bbox[cutfeat].low = cutval;\n            node->child2 = divideTree(left+idx, right, right_bbox);\n\n            node->divlow = left_bbox[cutfeat].high;\n            node->divhigh = right_bbox[cutfeat].low;\n\n            for (size_t i=0; i<dim_; ++i) {\n                bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low);\n                bbox[i].high = std::max(left_bbox[i].high, right_bbox[i].high);\n            }\n        }\n\n        return node;\n    }\n\n    void computeMinMax(int* ind, int count, int dim, ElementType& min_elem, ElementType& max_elem)\n    {\n        min_elem = dataset_[ind[0]][dim];\n        max_elem = dataset_[ind[0]][dim];\n        for (int i=1; i<count; ++i) {\n            ElementType val = dataset_[ind[i]][dim];\n            if (val<min_elem) min_elem = val;\n            if (val>max_elem) max_elem = val;\n        }\n    }\n\n    void middleSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox)\n    {\n        // find the largest span from the approximate bounding box\n        ElementType max_span = bbox[0].high-bbox[0].low;\n        cutfeat = 0;\n        cutval = (bbox[0].high+bbox[0].low)/2;\n        for (size_t i=1; i<dim_; ++i) {\n            ElementType span = bbox[i].high-bbox[i].low;\n            if (span>max_span) {\n                max_span = span;\n                cutfeat = i;\n                cutval = (bbox[i].high+bbox[i].low)/2;\n            }\n        }\n\n        // compute exact span on the found dimension\n        ElementType min_elem, max_elem;\n        computeMinMax(ind, count, cutfeat, min_elem, max_elem);\n        cutval = (min_elem+max_elem)/2;\n        max_span = max_elem - min_elem;\n\n        // check if a dimension of a largest span exists\n        size_t k = cutfeat;\n        for (size_t i=0; i<dim_; ++i) {\n            if (i==k) continue;\n            ElementType span = bbox[i].high-bbox[i].low;\n            if (span>max_span) {\n                computeMinMax(ind, count, i, min_elem, max_elem);\n                span = max_elem - min_elem;\n                if (span>max_span) {\n                    max_span = span;\n                    cutfeat = i;\n                    cutval = (min_elem+max_elem)/2;\n                }\n            }\n        }\n        int lim1, lim2;\n        planeSplit(ind, count, cutfeat, cutval, lim1, lim2);\n\n        if (lim1>count/2) index = lim1;\n        else if (lim2<count/2) index = lim2;\n        else index = count/2;\n    }\n\n\n    void middleSplit_(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox)\n    {\n        const float EPS=0.00001f;\n        DistanceType max_span = bbox[0].high-bbox[0].low;\n        for (size_t i=1; i<dim_; ++i) {\n            DistanceType span = bbox[i].high-bbox[i].low;\n            if (span>max_span) {\n                max_span = span;\n            }\n        }\n        DistanceType max_spread = -1;\n        cutfeat = 0;\n        for (size_t i=0; i<dim_; ++i) {\n            DistanceType span = bbox[i].high-bbox[i].low;\n            if (span>(DistanceType)((1-EPS)*max_span)) {\n                ElementType min_elem, max_elem;\n                computeMinMax(ind, count, cutfeat, min_elem, max_elem);\n                DistanceType spread = (DistanceType)(max_elem-min_elem);\n                if (spread>max_spread) {\n                    cutfeat = (int)i;\n                    max_spread = spread;\n                }\n            }\n        }\n        // split in the middle\n        DistanceType split_val = (bbox[cutfeat].low+bbox[cutfeat].high)/2;\n        ElementType min_elem, max_elem;\n        computeMinMax(ind, count, cutfeat, min_elem, max_elem);\n\n        if (split_val<min_elem) cutval = (DistanceType)min_elem;\n        else if (split_val>max_elem) cutval = (DistanceType)max_elem;\n        else cutval = split_val;\n\n        int lim1, lim2;\n        planeSplit(ind, count, cutfeat, cutval, lim1, lim2);\n\n        if (lim1>count/2) index = lim1;\n        else if (lim2<count/2) index = lim2;\n        else index = count/2;\n    }\n\n\n    /**\n     *  Subdivide the list of points by a plane perpendicular on axe corresponding\n     *  to the 'cutfeat' dimension at 'cutval' position.\n     *\n     *  On return:\n     *  dataset[ind[0..lim1-1]][cutfeat]<cutval\n     *  dataset[ind[lim1..lim2-1]][cutfeat]==cutval\n     *  dataset[ind[lim2..count]][cutfeat]>cutval\n     */\n    void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2)\n    {\n        /* Move vector indices for left subtree to front of list. */\n        int left = 0;\n        int right = count-1;\n        for (;; ) {\n            while (left<=right && dataset_[ind[left]][cutfeat]<cutval) ++left;\n            while (left<=right && dataset_[ind[right]][cutfeat]>=cutval) --right;\n            if (left>right) break;\n            std::swap(ind[left], ind[right]); ++left; --right;\n        }\n        /* If either list is empty, it means that all remaining features\n         * are identical. Split in the middle to maintain a balanced tree.\n         */\n        lim1 = left;\n        right = count-1;\n        for (;; ) {\n            while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left;\n            while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right;\n            if (left>right) break;\n            std::swap(ind[left], ind[right]); ++left; --right;\n        }\n        lim2 = left;\n    }\n\n    DistanceType computeInitialDistances(const ElementType* vec, std::vector<DistanceType>& dists)\n    {\n        DistanceType distsq = 0.0;\n\n        for (size_t i = 0; i < dim_; ++i) {\n            if (vec[i] < root_bbox_[i].low) {\n                dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].low, (int)i);\n                distsq += dists[i];\n            }\n            if (vec[i] > root_bbox_[i].high) {\n                dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].high, (int)i);\n                distsq += dists[i];\n            }\n        }\n\n        return distsq;\n    }\n\n    /**\n     * Performs an exact search in the tree starting from a node.\n     */\n    void searchLevel(ResultSet<DistanceType>& result_set, const ElementType* vec, const NodePtr node, DistanceType mindistsq,\n                     std::vector<DistanceType>& dists, const float epsError)\n    {\n        /* If this is a leaf node, then do check and return. */\n        if ((node->child1 == NULL)&&(node->child2 == NULL)) {\n            DistanceType worst_dist = result_set.worstDist();\n            for (int i=node->left; i<node->right; ++i) {\n                int index = reorder_ ? i : vind_[i];\n                DistanceType dist = distance_(vec, data_[index], dim_, worst_dist);\n                if (dist<worst_dist) {\n                    result_set.addPoint(dist,vind_[i]);\n                }\n            }\n            return;\n        }\n\n        /* Which child branch should be taken first? */\n        int idx = node->divfeat;\n        ElementType val = vec[idx];\n        DistanceType diff1 = val - node->divlow;\n        DistanceType diff2 = val - node->divhigh;\n\n        NodePtr bestChild;\n        NodePtr otherChild;\n        DistanceType cut_dist;\n        if ((diff1+diff2)<0) {\n            bestChild = node->child1;\n            otherChild = node->child2;\n            cut_dist = distance_.accum_dist(val, node->divhigh, idx);\n        }\n        else {\n            bestChild = node->child2;\n            otherChild = node->child1;\n            cut_dist = distance_.accum_dist( val, node->divlow, idx);\n        }\n\n        /* Call recursively to search next level down. */\n        searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError);\n\n        DistanceType dst = dists[idx];\n        mindistsq = mindistsq + cut_dist - dst;\n        dists[idx] = cut_dist;\n        if (mindistsq*epsError<=result_set.worstDist()) {\n            searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError);\n        }\n        dists[idx] = dst;\n    }\n\nprivate:\n\n    /**\n     * The dataset used by this index\n     */\n    const Matrix<ElementType> dataset_;\n\n    IndexParams index_params_;\n\n    int leaf_max_size_;\n    bool reorder_;\n\n\n    /**\n     *  Array of indices to vectors in the dataset.\n     */\n    std::vector<int> vind_;\n\n    Matrix<ElementType> data_;\n\n    size_t size_;\n    size_t dim_;\n\n    /**\n     * Array of k-d trees used to find neighbours.\n     */\n    NodePtr root_node_;\n\n    BoundingBox root_bbox_;\n\n    /**\n     * Pooled memory allocator.\n     *\n     * Using a pooled memory allocator is more efficient\n     * than allocating memory directly when there is a large\n     * number small of memory allocations.\n     */\n    PooledAllocator pool_;\n\n    Distance distance_;\n};   // class KDTree\n\n}\n\n#endif //OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/kmeans_index.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_KMEANS_INDEX_H_\n#define OPENCV_FLANN_KMEANS_INDEX_H_\n\n#include <algorithm>\n#include <map>\n#include <cassert>\n#include <limits>\n#include <cmath>\n\n#include \"general.h\"\n#include \"nn_index.h\"\n#include \"dist.h\"\n#include \"matrix.h\"\n#include \"result_set.h\"\n#include \"heap.h\"\n#include \"allocator.h\"\n#include \"random.h\"\n#include \"saving.h\"\n#include \"logger.h\"\n\n\nnamespace cvflann\n{\n\nstruct KMeansIndexParams : public IndexParams\n{\n    KMeansIndexParams(int branching = 32, int iterations = 11,\n                      flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 )\n    {\n        (*this)[\"algorithm\"] = FLANN_INDEX_KMEANS;\n        // branching factor\n        (*this)[\"branching\"] = branching;\n        // max iterations to perform in one kmeans clustering (kmeans tree)\n        (*this)[\"iterations\"] = iterations;\n        // algorithm used for picking the initial cluster centers for kmeans tree\n        (*this)[\"centers_init\"] = centers_init;\n        // cluster boundary index. Used when searching the kmeans tree\n        (*this)[\"cb_index\"] = cb_index;\n    }\n};\n\n\n/**\n * Hierarchical kmeans index\n *\n * Contains a tree constructed through a hierarchical kmeans clustering\n * and other information for indexing a set of points for nearest-neighbour matching.\n */\ntemplate <typename Distance>\nclass KMeansIndex : public NNIndex<Distance>\n{\npublic:\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\n\n\n    typedef void (KMeansIndex::* centersAlgFunction)(int, int*, int, int*, int&);\n\n    /**\n     * The function used for choosing the cluster centers.\n     */\n    centersAlgFunction chooseCenters;\n\n\n\n    /**\n     * Chooses the initial centers in the k-means clustering in a random manner.\n     *\n     * Params:\n     *     k = number of centers\n     *     vecs = the dataset of points\n     *     indices = indices in the dataset\n     *     indices_length = length of indices vector\n     *\n     */\n    void chooseCentersRandom(int k, int* indices, int indices_length, int* centers, int& centers_length)\n    {\n        UniqueRandom r(indices_length);\n\n        int index;\n        for (index=0; index<k; ++index) {\n            bool duplicate = true;\n            int rnd;\n            while (duplicate) {\n                duplicate = false;\n                rnd = r.next();\n                if (rnd<0) {\n                    centers_length = index;\n                    return;\n                }\n\n                centers[index] = indices[rnd];\n\n                for (int j=0; j<index; ++j) {\n                    DistanceType sq = distance_(dataset_[centers[index]], dataset_[centers[j]], dataset_.cols);\n                    if (sq<1e-16) {\n                        duplicate = true;\n                    }\n                }\n            }\n        }\n\n        centers_length = index;\n    }\n\n\n    /**\n     * Chooses the initial centers in the k-means using Gonzales' algorithm\n     * so that the centers are spaced apart from each other.\n     *\n     * Params:\n     *     k = number of centers\n     *     vecs = the dataset of points\n     *     indices = indices in the dataset\n     * Returns:\n     */\n    void chooseCentersGonzales(int k, int* indices, int indices_length, int* centers, int& centers_length)\n    {\n        int n = indices_length;\n\n        int rnd = rand_int(n);\n        assert(rnd >=0 && rnd < n);\n\n        centers[0] = indices[rnd];\n\n        int index;\n        for (index=1; index<k; ++index) {\n\n            int best_index = -1;\n            DistanceType best_val = 0;\n            for (int j=0; j<n; ++j) {\n                DistanceType dist = distance_(dataset_[centers[0]],dataset_[indices[j]],dataset_.cols);\n                for (int i=1; i<index; ++i) {\n                    DistanceType tmp_dist = distance_(dataset_[centers[i]],dataset_[indices[j]],dataset_.cols);\n                    if (tmp_dist<dist) {\n                        dist = tmp_dist;\n                    }\n                }\n                if (dist>best_val) {\n                    best_val = dist;\n                    best_index = j;\n                }\n            }\n            if (best_index!=-1) {\n                centers[index] = indices[best_index];\n            }\n            else {\n                break;\n            }\n        }\n        centers_length = index;\n    }\n\n\n    /**\n     * Chooses the initial centers in the k-means using the algorithm\n     * proposed in the KMeans++ paper:\n     * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding\n     *\n     * Implementation of this function was converted from the one provided in Arthur's code.\n     *\n     * Params:\n     *     k = number of centers\n     *     vecs = the dataset of points\n     *     indices = indices in the dataset\n     * Returns:\n     */\n    void chooseCentersKMeanspp(int k, int* indices, int indices_length, int* centers, int& centers_length)\n    {\n        int n = indices_length;\n\n        double currentPot = 0;\n        DistanceType* closestDistSq = new DistanceType[n];\n\n        // Choose one random center and set the closestDistSq values\n        int index = rand_int(n);\n        assert(index >=0 && index < n);\n        centers[0] = indices[index];\n\n        for (int i = 0; i < n; i++) {\n            closestDistSq[i] = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols);\n            closestDistSq[i] = ensureSquareDistance<Distance>( closestDistSq[i] );\n            currentPot += closestDistSq[i];\n        }\n\n\n        const int numLocalTries = 1;\n\n        // Choose each center\n        int centerCount;\n        for (centerCount = 1; centerCount < k; centerCount++) {\n\n            // Repeat several trials\n            double bestNewPot = -1;\n            int bestNewIndex = -1;\n            for (int localTrial = 0; localTrial < numLocalTries; localTrial++) {\n\n                // Choose our center - have to be slightly careful to return a valid answer even accounting\n                // for possible rounding errors\n                double randVal = rand_double(currentPot);\n                for (index = 0; index < n-1; index++) {\n                    if (randVal <= closestDistSq[index]) break;\n                    else randVal -= closestDistSq[index];\n                }\n\n                // Compute the new potential\n                double newPot = 0;\n                for (int i = 0; i < n; i++) {\n                    DistanceType dist = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols);\n                    newPot += std::min( ensureSquareDistance<Distance>(dist), closestDistSq[i] );\n                }\n\n                // Store the best result\n                if ((bestNewPot < 0)||(newPot < bestNewPot)) {\n                    bestNewPot = newPot;\n                    bestNewIndex = index;\n                }\n            }\n\n            // Add the appropriate center\n            centers[centerCount] = indices[bestNewIndex];\n            currentPot = bestNewPot;\n            for (int i = 0; i < n; i++) {\n                DistanceType dist = distance_(dataset_[indices[i]], dataset_[indices[bestNewIndex]], dataset_.cols);\n                closestDistSq[i] = std::min( ensureSquareDistance<Distance>(dist), closestDistSq[i] );\n            }\n        }\n\n        centers_length = centerCount;\n\n        delete[] closestDistSq;\n    }\n\n\n\npublic:\n\n    flann_algorithm_t getType() const\n    {\n        return FLANN_INDEX_KMEANS;\n    }\n\n    class KMeansDistanceComputer : public cv::ParallelLoopBody\n    {\n    public:\n        KMeansDistanceComputer(Distance _distance, const Matrix<ElementType>& _dataset,\n            const int _branching, const int* _indices, const Matrix<double>& _dcenters, const size_t _veclen,\n            int* _count, int* _belongs_to, std::vector<DistanceType>& _radiuses, bool& _converged, cv::Mutex& _mtx)\n            : distance(_distance)\n            , dataset(_dataset)\n            , branching(_branching)\n            , indices(_indices)\n            , dcenters(_dcenters)\n            , veclen(_veclen)\n            , count(_count)\n            , belongs_to(_belongs_to)\n            , radiuses(_radiuses)\n            , converged(_converged)\n            , mtx(_mtx)\n        {\n        }\n\n        void operator()(const cv::Range& range) const\n        {\n            const int begin = range.start;\n            const int end = range.end;\n\n            for( int i = begin; i<end; ++i)\n            {\n                DistanceType sq_dist = distance(dataset[indices[i]], dcenters[0], veclen);\n                int new_centroid = 0;\n                for (int j=1; j<branching; ++j) {\n                    DistanceType new_sq_dist = distance(dataset[indices[i]], dcenters[j], veclen);\n                    if (sq_dist>new_sq_dist) {\n                        new_centroid = j;\n                        sq_dist = new_sq_dist;\n                    }\n                }\n                if (sq_dist > radiuses[new_centroid]) {\n                    radiuses[new_centroid] = sq_dist;\n                }\n                if (new_centroid != belongs_to[i]) {\n                    count[belongs_to[i]]--;\n                    count[new_centroid]++;\n                    belongs_to[i] = new_centroid;\n                    mtx.lock();\n                    converged = false;\n                    mtx.unlock();\n                }\n            }\n        }\n\n    private:\n        Distance distance;\n        const Matrix<ElementType>& dataset;\n        const int branching;\n        const int* indices;\n        const Matrix<double>& dcenters;\n        const size_t veclen;\n        int* count;\n        int* belongs_to;\n        std::vector<DistanceType>& radiuses;\n        bool& converged;\n        cv::Mutex& mtx;\n        KMeansDistanceComputer& operator=( const KMeansDistanceComputer & ) { return *this; }\n    };\n\n    /**\n     * Index constructor\n     *\n     * Params:\n     *          inputData = dataset with the input features\n     *          params = parameters passed to the hierarchical k-means algorithm\n     */\n    KMeansIndex(const Matrix<ElementType>& inputData, const IndexParams& params = KMeansIndexParams(),\n                Distance d = Distance())\n        : dataset_(inputData), index_params_(params), root_(NULL), indices_(NULL), distance_(d)\n    {\n        memoryCounter_ = 0;\n\n        size_ = dataset_.rows;\n        veclen_ = dataset_.cols;\n\n        branching_ = get_param(params,\"branching\",32);\n        iterations_ = get_param(params,\"iterations\",11);\n        if (iterations_<0) {\n            iterations_ = (std::numeric_limits<int>::max)();\n        }\n        centers_init_  = get_param(params,\"centers_init\",FLANN_CENTERS_RANDOM);\n\n        if (centers_init_==FLANN_CENTERS_RANDOM) {\n            chooseCenters = &KMeansIndex::chooseCentersRandom;\n        }\n        else if (centers_init_==FLANN_CENTERS_GONZALES) {\n            chooseCenters = &KMeansIndex::chooseCentersGonzales;\n        }\n        else if (centers_init_==FLANN_CENTERS_KMEANSPP) {\n            chooseCenters = &KMeansIndex::chooseCentersKMeanspp;\n        }\n        else {\n            throw FLANNException(\"Unknown algorithm for choosing initial centers.\");\n        }\n        cb_index_ = 0.4f;\n\n    }\n\n\n    KMeansIndex(const KMeansIndex&);\n    KMeansIndex& operator=(const KMeansIndex&);\n\n\n    /**\n     * Index destructor.\n     *\n     * Release the memory used by the index.\n     */\n    virtual ~KMeansIndex()\n    {\n        if (root_ != NULL) {\n            free_centers(root_);\n        }\n        if (indices_!=NULL) {\n            delete[] indices_;\n        }\n    }\n\n    /**\n     *  Returns size of index.\n     */\n    size_t size() const\n    {\n        return size_;\n    }\n\n    /**\n     * Returns the length of an index feature.\n     */\n    size_t veclen() const\n    {\n        return veclen_;\n    }\n\n\n    void set_cb_index( float index)\n    {\n        cb_index_ = index;\n    }\n\n    /**\n     * Computes the inde memory usage\n     * Returns: memory used by the index\n     */\n    int usedMemory() const\n    {\n        return pool_.usedMemory+pool_.wastedMemory+memoryCounter_;\n    }\n\n    /**\n     * Builds the index\n     */\n    void buildIndex()\n    {\n        if (branching_<2) {\n            throw FLANNException(\"Branching factor must be at least 2\");\n        }\n\n        indices_ = new int[size_];\n        for (size_t i=0; i<size_; ++i) {\n            indices_[i] = int(i);\n        }\n\n        root_ = pool_.allocate<KMeansNode>();\n        std::memset(root_, 0, sizeof(KMeansNode));\n\n        computeNodeStatistics(root_, indices_, (int)size_);\n        computeClustering(root_, indices_, (int)size_, branching_,0);\n    }\n\n\n    void saveIndex(FILE* stream)\n    {\n        save_value(stream, branching_);\n        save_value(stream, iterations_);\n        save_value(stream, memoryCounter_);\n        save_value(stream, cb_index_);\n        save_value(stream, *indices_, (int)size_);\n\n        save_tree(stream, root_);\n    }\n\n\n    void loadIndex(FILE* stream)\n    {\n        load_value(stream, branching_);\n        load_value(stream, iterations_);\n        load_value(stream, memoryCounter_);\n        load_value(stream, cb_index_);\n        if (indices_!=NULL) {\n            delete[] indices_;\n        }\n        indices_ = new int[size_];\n        load_value(stream, *indices_, size_);\n\n        if (root_!=NULL) {\n            free_centers(root_);\n        }\n        load_tree(stream, root_);\n\n        index_params_[\"algorithm\"] = getType();\n        index_params_[\"branching\"] = branching_;\n        index_params_[\"iterations\"] = iterations_;\n        index_params_[\"centers_init\"] = centers_init_;\n        index_params_[\"cb_index\"] = cb_index_;\n\n    }\n\n\n    /**\n     * Find set of nearest neighbors to vec. Their indices are stored inside\n     * the result object.\n     *\n     * Params:\n     *     result = the result object in which the indices of the nearest-neighbors are stored\n     *     vec = the vector for which to search the nearest neighbors\n     *     searchParams = parameters that influence the search algorithm (checks, cb_index)\n     */\n    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)\n    {\n\n        int maxChecks = get_param(searchParams,\"checks\",32);\n\n        if (maxChecks==FLANN_CHECKS_UNLIMITED) {\n            findExactNN(root_, result, vec);\n        }\n        else {\n            // Priority queue storing intermediate branches in the best-bin-first search\n            Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);\n\n            int checks = 0;\n            findNN(root_, result, vec, checks, maxChecks, heap);\n\n            BranchSt branch;\n            while (heap->popMin(branch) && (checks<maxChecks || !result.full())) {\n                KMeansNodePtr node = branch.node;\n                findNN(node, result, vec, checks, maxChecks, heap);\n            }\n            assert(result.full());\n\n            delete heap;\n        }\n\n    }\n\n    /**\n     * Clustering function that takes a cut in the hierarchical k-means\n     * tree and return the clusters centers of that clustering.\n     * Params:\n     *     numClusters = number of clusters to have in the clustering computed\n     * Returns: number of cluster centers\n     */\n    int getClusterCenters(Matrix<DistanceType>& centers)\n    {\n        int numClusters = centers.rows;\n        if (numClusters<1) {\n            throw FLANNException(\"Number of clusters must be at least 1\");\n        }\n\n        DistanceType variance;\n        KMeansNodePtr* clusters = new KMeansNodePtr[numClusters];\n\n        int clusterCount = getMinVarianceClusters(root_, clusters, numClusters, variance);\n\n        Logger::info(\"Clusters requested: %d, returning %d\\n\",numClusters, clusterCount);\n\n        for (int i=0; i<clusterCount; ++i) {\n            DistanceType* center = clusters[i]->pivot;\n            for (size_t j=0; j<veclen_; ++j) {\n                centers[i][j] = center[j];\n            }\n        }\n        delete[] clusters;\n\n        return clusterCount;\n    }\n\n    IndexParams getParameters() const\n    {\n        return index_params_;\n    }\n\n\nprivate:\n    /**\n     * Struture representing a node in the hierarchical k-means tree.\n     */\n    struct KMeansNode\n    {\n        /**\n         * The cluster center.\n         */\n        DistanceType* pivot;\n        /**\n         * The cluster radius.\n         */\n        DistanceType radius;\n        /**\n         * The cluster mean radius.\n         */\n        DistanceType mean_radius;\n        /**\n         * The cluster variance.\n         */\n        DistanceType variance;\n        /**\n         * The cluster size (number of points in the cluster)\n         */\n        int size;\n        /**\n         * Child nodes (only for non-terminal nodes)\n         */\n        KMeansNode** childs;\n        /**\n         * Node points (only for terminal nodes)\n         */\n        int* indices;\n        /**\n         * Level\n         */\n        int level;\n    };\n    typedef KMeansNode* KMeansNodePtr;\n\n    /**\n     * Alias definition for a nicer syntax.\n     */\n    typedef BranchStruct<KMeansNodePtr, DistanceType> BranchSt;\n\n\n\n\n    void save_tree(FILE* stream, KMeansNodePtr node)\n    {\n        save_value(stream, *node);\n        save_value(stream, *(node->pivot), (int)veclen_);\n        if (node->childs==NULL) {\n            int indices_offset = (int)(node->indices - indices_);\n            save_value(stream, indices_offset);\n        }\n        else {\n            for(int i=0; i<branching_; ++i) {\n                save_tree(stream, node->childs[i]);\n            }\n        }\n    }\n\n\n    void load_tree(FILE* stream, KMeansNodePtr& node)\n    {\n        node = pool_.allocate<KMeansNode>();\n        load_value(stream, *node);\n        node->pivot = new DistanceType[veclen_];\n        load_value(stream, *(node->pivot), (int)veclen_);\n        if (node->childs==NULL) {\n            int indices_offset;\n            load_value(stream, indices_offset);\n            node->indices = indices_ + indices_offset;\n        }\n        else {\n            node->childs = pool_.allocate<KMeansNodePtr>(branching_);\n            for(int i=0; i<branching_; ++i) {\n                load_tree(stream, node->childs[i]);\n            }\n        }\n    }\n\n\n    /**\n     * Helper function\n     */\n    void free_centers(KMeansNodePtr node)\n    {\n        delete[] node->pivot;\n        if (node->childs!=NULL) {\n            for (int k=0; k<branching_; ++k) {\n                free_centers(node->childs[k]);\n            }\n        }\n    }\n\n    /**\n     * Computes the statistics of a node (mean, radius, variance).\n     *\n     * Params:\n     *     node = the node to use\n     *     indices = the indices of the points belonging to the node\n     */\n    void computeNodeStatistics(KMeansNodePtr node, int* indices, int indices_length)\n    {\n\n        DistanceType radius = 0;\n        DistanceType variance = 0;\n        DistanceType* mean = new DistanceType[veclen_];\n        memoryCounter_ += int(veclen_*sizeof(DistanceType));\n\n        memset(mean,0,veclen_*sizeof(DistanceType));\n\n        for (size_t i=0; i<size_; ++i) {\n            ElementType* vec = dataset_[indices[i]];\n            for (size_t j=0; j<veclen_; ++j) {\n                mean[j] += vec[j];\n            }\n            variance += distance_(vec, ZeroIterator<ElementType>(), veclen_);\n        }\n        for (size_t j=0; j<veclen_; ++j) {\n            mean[j] /= size_;\n        }\n        variance /= size_;\n        variance -= distance_(mean, ZeroIterator<ElementType>(), veclen_);\n\n        DistanceType tmp = 0;\n        for (int i=0; i<indices_length; ++i) {\n            tmp = distance_(mean, dataset_[indices[i]], veclen_);\n            if (tmp>radius) {\n                radius = tmp;\n            }\n        }\n\n        node->variance = variance;\n        node->radius = radius;\n        node->pivot = mean;\n    }\n\n\n    /**\n     * The method responsible with actually doing the recursive hierarchical\n     * clustering\n     *\n     * Params:\n     *     node = the node to cluster\n     *     indices = indices of the points belonging to the current node\n     *     branching = the branching factor to use in the clustering\n     *\n     * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point)\n     */\n    void computeClustering(KMeansNodePtr node, int* indices, int indices_length, int branching, int level)\n    {\n        node->size = indices_length;\n        node->level = level;\n\n        if (indices_length < branching) {\n            node->indices = indices;\n            std::sort(node->indices,node->indices+indices_length);\n            node->childs = NULL;\n            return;\n        }\n\n        cv::AutoBuffer<int> centers_idx_buf(branching);\n        int* centers_idx = (int*)centers_idx_buf;\n        int centers_length;\n        (this->*chooseCenters)(branching, indices, indices_length, centers_idx, centers_length);\n\n        if (centers_length<branching) {\n            node->indices = indices;\n            std::sort(node->indices,node->indices+indices_length);\n            node->childs = NULL;\n            return;\n        }\n\n\n        cv::AutoBuffer<double> dcenters_buf(branching*veclen_);\n        Matrix<double> dcenters((double*)dcenters_buf,branching,veclen_);\n        for (int i=0; i<centers_length; ++i) {\n            ElementType* vec = dataset_[centers_idx[i]];\n            for (size_t k=0; k<veclen_; ++k) {\n                dcenters[i][k] = double(vec[k]);\n            }\n        }\n\n        std::vector<DistanceType> radiuses(branching);\n        cv::AutoBuffer<int> count_buf(branching);\n        int* count = (int*)count_buf;\n        for (int i=0; i<branching; ++i) {\n            radiuses[i] = 0;\n            count[i] = 0;\n        }\n\n        //\tassign points to clusters\n        cv::AutoBuffer<int> belongs_to_buf(indices_length);\n        int* belongs_to = (int*)belongs_to_buf;\n        for (int i=0; i<indices_length; ++i) {\n\n            DistanceType sq_dist = distance_(dataset_[indices[i]], dcenters[0], veclen_);\n            belongs_to[i] = 0;\n            for (int j=1; j<branching; ++j) {\n                DistanceType new_sq_dist = distance_(dataset_[indices[i]], dcenters[j], veclen_);\n                if (sq_dist>new_sq_dist) {\n                    belongs_to[i] = j;\n                    sq_dist = new_sq_dist;\n                }\n            }\n            if (sq_dist>radiuses[belongs_to[i]]) {\n                radiuses[belongs_to[i]] = sq_dist;\n            }\n            count[belongs_to[i]]++;\n        }\n\n        bool converged = false;\n        int iteration = 0;\n        while (!converged && iteration<iterations_) {\n            converged = true;\n            iteration++;\n\n            // compute the new cluster centers\n            for (int i=0; i<branching; ++i) {\n                memset(dcenters[i],0,sizeof(double)*veclen_);\n                radiuses[i] = 0;\n            }\n            for (int i=0; i<indices_length; ++i) {\n                ElementType* vec = dataset_[indices[i]];\n                double* center = dcenters[belongs_to[i]];\n                for (size_t k=0; k<veclen_; ++k) {\n                    center[k] += vec[k];\n                }\n            }\n            for (int i=0; i<branching; ++i) {\n                int cnt = count[i];\n                for (size_t k=0; k<veclen_; ++k) {\n                    dcenters[i][k] /= cnt;\n                }\n            }\n\n            // reassign points to clusters\n            cv::Mutex mtx;\n            KMeansDistanceComputer invoker(distance_, dataset_, branching, indices, dcenters, veclen_, count, belongs_to, radiuses, converged, mtx);\n            parallel_for_(cv::Range(0, (int)indices_length), invoker);\n\n            for (int i=0; i<branching; ++i) {\n                // if one cluster converges to an empty cluster,\n                // move an element into that cluster\n                if (count[i]==0) {\n                    int j = (i+1)%branching;\n                    while (count[j]<=1) {\n                        j = (j+1)%branching;\n                    }\n\n                    for (int k=0; k<indices_length; ++k) {\n                        if (belongs_to[k]==j) {\n                            // for cluster j, we move the furthest element from the center to the empty cluster i\n                            if ( distance_(dataset_[indices[k]], dcenters[j], veclen_) == radiuses[j] ) {\n                                belongs_to[k] = i;\n                                count[j]--;\n                                count[i]++;\n                                break;\n                            }\n                        }\n                    }\n                    converged = false;\n                }\n            }\n\n        }\n\n        DistanceType** centers = new DistanceType*[branching];\n\n        for (int i=0; i<branching; ++i) {\n            centers[i] = new DistanceType[veclen_];\n            memoryCounter_ += (int)(veclen_*sizeof(DistanceType));\n            for (size_t k=0; k<veclen_; ++k) {\n                centers[i][k] = (DistanceType)dcenters[i][k];\n            }\n        }\n\n\n        // compute kmeans clustering for each of the resulting clusters\n        node->childs = pool_.allocate<KMeansNodePtr>(branching);\n        int start = 0;\n        int end = start;\n        for (int c=0; c<branching; ++c) {\n            int s = count[c];\n\n            DistanceType variance = 0;\n            DistanceType mean_radius =0;\n            for (int i=0; i<indices_length; ++i) {\n                if (belongs_to[i]==c) {\n                    DistanceType d = distance_(dataset_[indices[i]], ZeroIterator<ElementType>(), veclen_);\n                    variance += d;\n                    mean_radius += sqrt(d);\n                    std::swap(indices[i],indices[end]);\n                    std::swap(belongs_to[i],belongs_to[end]);\n                    end++;\n                }\n            }\n            variance /= s;\n            mean_radius /= s;\n            variance -= distance_(centers[c], ZeroIterator<ElementType>(), veclen_);\n\n            node->childs[c] = pool_.allocate<KMeansNode>();\n            std::memset(node->childs[c], 0, sizeof(KMeansNode));\n            node->childs[c]->radius = radiuses[c];\n            node->childs[c]->pivot = centers[c];\n            node->childs[c]->variance = variance;\n            node->childs[c]->mean_radius = mean_radius;\n            computeClustering(node->childs[c],indices+start, end-start, branching, level+1);\n            start=end;\n        }\n    }\n\n\n\n    /**\n     * Performs one descent in the hierarchical k-means tree. The branches not\n     * visited are stored in a priority queue.\n     *\n     * Params:\n     *      node = node to explore\n     *      result = container for the k-nearest neighbors found\n     *      vec = query points\n     *      checks = how many points in the dataset have been checked so far\n     *      maxChecks = maximum dataset points to checks\n     */\n\n\n    void findNN(KMeansNodePtr node, ResultSet<DistanceType>& result, const ElementType* vec, int& checks, int maxChecks,\n                Heap<BranchSt>* heap)\n    {\n        // Ignore those clusters that are too far away\n        {\n            DistanceType bsq = distance_(vec, node->pivot, veclen_);\n            DistanceType rsq = node->radius;\n            DistanceType wsq = result.worstDist();\n\n            DistanceType val = bsq-rsq-wsq;\n            DistanceType val2 = val*val-4*rsq*wsq;\n\n            //if (val>0) {\n            if ((val>0)&&(val2>0)) {\n                return;\n            }\n        }\n\n        if (node->childs==NULL) {\n            if (checks>=maxChecks) {\n                if (result.full()) return;\n            }\n            checks += node->size;\n            for (int i=0; i<node->size; ++i) {\n                int index = node->indices[i];\n                DistanceType dist = distance_(dataset_[index], vec, veclen_);\n                result.addPoint(dist, index);\n            }\n        }\n        else {\n            DistanceType* domain_distances = new DistanceType[branching_];\n            int closest_center = exploreNodeBranches(node, vec, domain_distances, heap);\n            delete[] domain_distances;\n            findNN(node->childs[closest_center],result,vec, checks, maxChecks, heap);\n        }\n    }\n\n    /**\n     * Helper function that computes the nearest childs of a node to a given query point.\n     * Params:\n     *     node = the node\n     *     q = the query point\n     *     distances = array with the distances to each child node.\n     * Returns:\n     */\n    int exploreNodeBranches(KMeansNodePtr node, const ElementType* q, DistanceType* domain_distances, Heap<BranchSt>* heap)\n    {\n\n        int best_index = 0;\n        domain_distances[best_index] = distance_(q, node->childs[best_index]->pivot, veclen_);\n        for (int i=1; i<branching_; ++i) {\n            domain_distances[i] = distance_(q, node->childs[i]->pivot, veclen_);\n            if (domain_distances[i]<domain_distances[best_index]) {\n                best_index = i;\n            }\n        }\n\n        //\t\tfloat* best_center = node->childs[best_index]->pivot;\n        for (int i=0; i<branching_; ++i) {\n            if (i != best_index) {\n                domain_distances[i] -= cb_index_*node->childs[i]->variance;\n\n                //\t\t\t\tfloat dist_to_border = getDistanceToBorder(node.childs[i].pivot,best_center,q);\n                //\t\t\t\tif (domain_distances[i]<dist_to_border) {\n                //\t\t\t\t\tdomain_distances[i] = dist_to_border;\n                //\t\t\t\t}\n                heap->insert(BranchSt(node->childs[i],domain_distances[i]));\n            }\n        }\n\n        return best_index;\n    }\n\n\n    /**\n     * Function the performs exact nearest neighbor search by traversing the entire tree.\n     */\n    void findExactNN(KMeansNodePtr node, ResultSet<DistanceType>& result, const ElementType* vec)\n    {\n        // Ignore those clusters that are too far away\n        {\n            DistanceType bsq = distance_(vec, node->pivot, veclen_);\n            DistanceType rsq = node->radius;\n            DistanceType wsq = result.worstDist();\n\n            DistanceType val = bsq-rsq-wsq;\n            DistanceType val2 = val*val-4*rsq*wsq;\n\n            //                  if (val>0) {\n            if ((val>0)&&(val2>0)) {\n                return;\n            }\n        }\n\n\n        if (node->childs==NULL) {\n            for (int i=0; i<node->size; ++i) {\n                int index = node->indices[i];\n                DistanceType dist = distance_(dataset_[index], vec, veclen_);\n                result.addPoint(dist, index);\n            }\n        }\n        else {\n            int* sort_indices = new int[branching_];\n\n            getCenterOrdering(node, vec, sort_indices);\n\n            for (int i=0; i<branching_; ++i) {\n                findExactNN(node->childs[sort_indices[i]],result,vec);\n            }\n\n            delete[] sort_indices;\n        }\n    }\n\n\n    /**\n     * Helper function.\n     *\n     * I computes the order in which to traverse the child nodes of a particular node.\n     */\n    void getCenterOrdering(KMeansNodePtr node, const ElementType* q, int* sort_indices)\n    {\n        DistanceType* domain_distances = new DistanceType[branching_];\n        for (int i=0; i<branching_; ++i) {\n            DistanceType dist = distance_(q, node->childs[i]->pivot, veclen_);\n\n            int j=0;\n            while (domain_distances[j]<dist && j<i) j++;\n            for (int k=i; k>j; --k) {\n                domain_distances[k] = domain_distances[k-1];\n                sort_indices[k] = sort_indices[k-1];\n            }\n            domain_distances[j] = dist;\n            sort_indices[j] = i;\n        }\n        delete[] domain_distances;\n    }\n\n    /**\n     * Method that computes the squared distance from the query point q\n     * from inside region with center c to the border between this\n     * region and the region with center p\n     */\n    DistanceType getDistanceToBorder(DistanceType* p, DistanceType* c, DistanceType* q)\n    {\n        DistanceType sum = 0;\n        DistanceType sum2 = 0;\n\n        for (int i=0; i<veclen_; ++i) {\n            DistanceType t = c[i]-p[i];\n            sum += t*(q[i]-(c[i]+p[i])/2);\n            sum2 += t*t;\n        }\n\n        return sum*sum/sum2;\n    }\n\n\n    /**\n     * Helper function the descends in the hierarchical k-means tree by spliting those clusters that minimize\n     * the overall variance of the clustering.\n     * Params:\n     *     root = root node\n     *     clusters = array with clusters centers (return value)\n     *     varianceValue = variance of the clustering (return value)\n     * Returns:\n     */\n    int getMinVarianceClusters(KMeansNodePtr root, KMeansNodePtr* clusters, int clusters_length, DistanceType& varianceValue)\n    {\n        int clusterCount = 1;\n        clusters[0] = root;\n\n        DistanceType meanVariance = root->variance*root->size;\n\n        while (clusterCount<clusters_length) {\n            DistanceType minVariance = (std::numeric_limits<DistanceType>::max)();\n            int splitIndex = -1;\n\n            for (int i=0; i<clusterCount; ++i) {\n                if (clusters[i]->childs != NULL) {\n\n                    DistanceType variance = meanVariance - clusters[i]->variance*clusters[i]->size;\n\n                    for (int j=0; j<branching_; ++j) {\n                        variance += clusters[i]->childs[j]->variance*clusters[i]->childs[j]->size;\n                    }\n                    if (variance<minVariance) {\n                        minVariance = variance;\n                        splitIndex = i;\n                    }\n                }\n            }\n\n            if (splitIndex==-1) break;\n            if ( (branching_+clusterCount-1) > clusters_length) break;\n\n            meanVariance = minVariance;\n\n            // split node\n            KMeansNodePtr toSplit = clusters[splitIndex];\n            clusters[splitIndex] = toSplit->childs[0];\n            for (int i=1; i<branching_; ++i) {\n                clusters[clusterCount++] = toSplit->childs[i];\n            }\n        }\n\n        varianceValue = meanVariance/root->size;\n        return clusterCount;\n    }\n\nprivate:\n    /** The branching factor used in the hierarchical k-means clustering */\n    int branching_;\n\n    /** Maximum number of iterations to use when performing k-means clustering */\n    int iterations_;\n\n    /** Algorithm for choosing the cluster centers */\n    flann_centers_init_t centers_init_;\n\n    /**\n     * Cluster border index. This is used in the tree search phase when determining\n     * the closest cluster to explore next. A zero value takes into account only\n     * the cluster centres, a value greater then zero also take into account the size\n     * of the cluster.\n     */\n    float cb_index_;\n\n    /**\n     * The dataset used by this index\n     */\n    const Matrix<ElementType> dataset_;\n\n    /** Index parameters */\n    IndexParams index_params_;\n\n    /**\n     * Number of features in the dataset.\n     */\n    size_t size_;\n\n    /**\n     * Length of each feature.\n     */\n    size_t veclen_;\n\n    /**\n     * The root node in the tree.\n     */\n    KMeansNodePtr root_;\n\n    /**\n     *  Array of indices to vectors in the dataset.\n     */\n    int* indices_;\n\n    /**\n     * The distance\n     */\n    Distance distance_;\n\n    /**\n     * Pooled memory allocator.\n     */\n    PooledAllocator pool_;\n\n    /**\n     * Memory occupied by the index.\n     */\n    int memoryCounter_;\n};\n\n}\n\n#endif //OPENCV_FLANN_KMEANS_INDEX_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/linear_index.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_LINEAR_INDEX_H_\n#define OPENCV_FLANN_LINEAR_INDEX_H_\n\n#include \"general.h\"\n#include \"nn_index.h\"\n\nnamespace cvflann\n{\n\nstruct LinearIndexParams : public IndexParams\n{\n    LinearIndexParams()\n    {\n        (* this)[\"algorithm\"] = FLANN_INDEX_LINEAR;\n    }\n};\n\ntemplate <typename Distance>\nclass LinearIndex : public NNIndex<Distance>\n{\npublic:\n\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\n\n    LinearIndex(const Matrix<ElementType>& inputData, const IndexParams& params = LinearIndexParams(),\n                Distance d = Distance()) :\n        dataset_(inputData), index_params_(params), distance_(d)\n    {\n    }\n\n    LinearIndex(const LinearIndex&);\n    LinearIndex& operator=(const LinearIndex&);\n\n    flann_algorithm_t getType() const\n    {\n        return FLANN_INDEX_LINEAR;\n    }\n\n\n    size_t size() const\n    {\n        return dataset_.rows;\n    }\n\n    size_t veclen() const\n    {\n        return dataset_.cols;\n    }\n\n\n    int usedMemory() const\n    {\n        return 0;\n    }\n\n    void buildIndex()\n    {\n        /* nothing to do here for linear search */\n    }\n\n    void saveIndex(FILE*)\n    {\n        /* nothing to do here for linear search */\n    }\n\n\n    void loadIndex(FILE*)\n    {\n        /* nothing to do here for linear search */\n\n        index_params_[\"algorithm\"] = getType();\n    }\n\n    void findNeighbors(ResultSet<DistanceType>& resultSet, const ElementType* vec, const SearchParams& /*searchParams*/)\n    {\n        ElementType* data = dataset_.data;\n        for (size_t i = 0; i < dataset_.rows; ++i, data += dataset_.cols) {\n            DistanceType dist = distance_(data, vec, dataset_.cols);\n            resultSet.addPoint(dist, (int)i);\n        }\n    }\n\n    IndexParams getParameters() const\n    {\n        return index_params_;\n    }\n\nprivate:\n    /** The dataset */\n    const Matrix<ElementType> dataset_;\n    /** Index parameters */\n    IndexParams index_params_;\n    /** Index distance */\n    Distance distance_;\n\n};\n\n}\n\n#endif // OPENCV_FLANN_LINEAR_INDEX_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/logger.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_LOGGER_H\n#define OPENCV_FLANN_LOGGER_H\n\n#include <stdio.h>\n#include <stdarg.h>\n\n#include \"defines.h\"\n\n\nnamespace cvflann\n{\n\nclass Logger\n{\n    Logger() : stream(stdout), logLevel(FLANN_LOG_WARN) {}\n\n    ~Logger()\n    {\n        if ((stream!=NULL)&&(stream!=stdout)) {\n            fclose(stream);\n        }\n    }\n\n    static Logger& instance()\n    {\n        static Logger logger;\n        return logger;\n    }\n\n    void _setDestination(const char* name)\n    {\n        if (name==NULL) {\n            stream = stdout;\n        }\n        else {\n            stream = fopen(name,\"w\");\n            if (stream == NULL) {\n                stream = stdout;\n            }\n        }\n    }\n\n    int _log(int level, const char* fmt, va_list arglist)\n    {\n        if (level > logLevel ) return -1;\n        int ret = vfprintf(stream, fmt, arglist);\n        return ret;\n    }\n\npublic:\n    /**\n     * Sets the logging level. All messages with lower priority will be ignored.\n     * @param level Logging level\n     */\n    static void setLevel(int level) { instance().logLevel = level; }\n\n    /**\n     * Sets the logging destination\n     * @param name Filename or NULL for console\n     */\n    static void setDestination(const char* name) { instance()._setDestination(name); }\n\n    /**\n     * Print log message\n     * @param level Log level\n     * @param fmt Message format\n     * @return\n     */\n    static int log(int level, const char* fmt, ...)\n    {\n        va_list arglist;\n        va_start(arglist, fmt);\n        int ret = instance()._log(level,fmt,arglist);\n        va_end(arglist);\n        return ret;\n    }\n\n#define LOG_METHOD(NAME,LEVEL) \\\n    static int NAME(const char* fmt, ...) \\\n    { \\\n        va_list ap; \\\n        va_start(ap, fmt); \\\n        int ret = instance()._log(LEVEL, fmt, ap); \\\n        va_end(ap); \\\n        return ret; \\\n    }\n\n    LOG_METHOD(fatal, FLANN_LOG_FATAL)\n    LOG_METHOD(error, FLANN_LOG_ERROR)\n    LOG_METHOD(warn, FLANN_LOG_WARN)\n    LOG_METHOD(info, FLANN_LOG_INFO)\n\nprivate:\n    FILE* stream;\n    int logLevel;\n};\n\n}\n\n#endif //OPENCV_FLANN_LOGGER_H\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/lsh_index.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n/***********************************************************************\n * Author: Vincent Rabaud\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_LSH_INDEX_H_\n#define OPENCV_FLANN_LSH_INDEX_H_\n\n#include <algorithm>\n#include <cassert>\n#include <cstring>\n#include <map>\n#include <vector>\n\n#include \"general.h\"\n#include \"nn_index.h\"\n#include \"matrix.h\"\n#include \"result_set.h\"\n#include \"heap.h\"\n#include \"lsh_table.h\"\n#include \"allocator.h\"\n#include \"random.h\"\n#include \"saving.h\"\n\nnamespace cvflann\n{\n\nstruct LshIndexParams : public IndexParams\n{\n    LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2)\n    {\n        (* this)[\"algorithm\"] = FLANN_INDEX_LSH;\n        // The number of hash tables to use\n        (*this)[\"table_number\"] = table_number;\n        // The length of the key in the hash tables\n        (*this)[\"key_size\"] = key_size;\n        // Number of levels to use in multi-probe (0 for standard LSH)\n        (*this)[\"multi_probe_level\"] = multi_probe_level;\n    }\n};\n\n/**\n * Randomized kd-tree index\n *\n * Contains the k-d trees and other information for indexing a set of points\n * for nearest-neighbor matching.\n */\ntemplate<typename Distance>\nclass LshIndex : public NNIndex<Distance>\n{\npublic:\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\n    /** Constructor\n     * @param input_data dataset with the input features\n     * @param params parameters passed to the LSH algorithm\n     * @param d the distance used\n     */\n    LshIndex(const Matrix<ElementType>& input_data, const IndexParams& params = LshIndexParams(),\n             Distance d = Distance()) :\n        dataset_(input_data), index_params_(params), distance_(d)\n    {\n        // cv::flann::IndexParams sets integer params as 'int', so it is used with get_param\n        // in place of 'unsigned int'\n        table_number_ = (unsigned int)get_param<int>(index_params_,\"table_number\",12);\n        key_size_ = (unsigned int)get_param<int>(index_params_,\"key_size\",20);\n        multi_probe_level_ = (unsigned int)get_param<int>(index_params_,\"multi_probe_level\",2);\n\n        feature_size_ = (unsigned)dataset_.cols;\n        fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);\n    }\n\n\n    LshIndex(const LshIndex&);\n    LshIndex& operator=(const LshIndex&);\n\n    /**\n     * Builds the index\n     */\n    void buildIndex()\n    {\n        tables_.resize(table_number_);\n        for (unsigned int i = 0; i < table_number_; ++i) {\n            lsh::LshTable<ElementType>& table = tables_[i];\n            table = lsh::LshTable<ElementType>(feature_size_, key_size_);\n\n            // Add the features to the table\n            table.add(dataset_);\n        }\n    }\n\n    flann_algorithm_t getType() const\n    {\n        return FLANN_INDEX_LSH;\n    }\n\n\n    void saveIndex(FILE* stream)\n    {\n        save_value(stream,table_number_);\n        save_value(stream,key_size_);\n        save_value(stream,multi_probe_level_);\n        save_value(stream, dataset_);\n    }\n\n    void loadIndex(FILE* stream)\n    {\n        load_value(stream, table_number_);\n        load_value(stream, key_size_);\n        load_value(stream, multi_probe_level_);\n        load_value(stream, dataset_);\n        // Building the index is so fast we can afford not storing it\n        buildIndex();\n\n        index_params_[\"algorithm\"] = getType();\n        index_params_[\"table_number\"] = table_number_;\n        index_params_[\"key_size\"] = key_size_;\n        index_params_[\"multi_probe_level\"] = multi_probe_level_;\n    }\n\n    /**\n     *  Returns size of index.\n     */\n    size_t size() const\n    {\n        return dataset_.rows;\n    }\n\n    /**\n     * Returns the length of an index feature.\n     */\n    size_t veclen() const\n    {\n        return feature_size_;\n    }\n\n    /**\n     * Computes the index memory usage\n     * Returns: memory used by the index\n     */\n    int usedMemory() const\n    {\n        return (int)(dataset_.rows * sizeof(int));\n    }\n\n\n    IndexParams getParameters() const\n    {\n        return index_params_;\n    }\n\n    /**\n     * \\brief Perform k-nearest neighbor search\n     * \\param[in] queries The query points for which to find the nearest neighbors\n     * \\param[out] indices The indices of the nearest neighbors found\n     * \\param[out] dists Distances to the nearest neighbors found\n     * \\param[in] knn Number of nearest neighbors to return\n     * \\param[in] params Search parameters\n     */\n    virtual void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)\n    {\n        assert(queries.cols == veclen());\n        assert(indices.rows >= queries.rows);\n        assert(dists.rows >= queries.rows);\n        assert(int(indices.cols) >= knn);\n        assert(int(dists.cols) >= knn);\n\n\n        KNNUniqueResultSet<DistanceType> resultSet(knn);\n        for (size_t i = 0; i < queries.rows; i++) {\n            resultSet.clear();\n            std::fill_n(indices[i], knn, -1);\n            std::fill_n(dists[i], knn, std::numeric_limits<DistanceType>::max());\n            findNeighbors(resultSet, queries[i], params);\n            if (get_param(params,\"sorted\",true)) resultSet.sortAndCopy(indices[i], dists[i], knn);\n            else resultSet.copy(indices[i], dists[i], knn);\n        }\n    }\n\n\n    /**\n     * Find set of nearest neighbors to vec. Their indices are stored inside\n     * the result object.\n     *\n     * Params:\n     *     result = the result object in which the indices of the nearest-neighbors are stored\n     *     vec = the vector for which to search the nearest neighbors\n     *     maxCheck = the maximum number of restarts (in a best-bin-first manner)\n     */\n    void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/)\n    {\n        getNeighbors(vec, result);\n    }\n\nprivate:\n    /** Defines the comparator on score and index\n     */\n    typedef std::pair<float, unsigned int> ScoreIndexPair;\n    struct SortScoreIndexPairOnSecond\n    {\n        bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const\n        {\n            return left.second < right.second;\n        }\n    };\n\n    /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH\n     * @param key the key we build neighbors from\n     * @param lowest_index the lowest index of the bit set\n     * @param level the multi-probe level we are at\n     * @param xor_masks all the xor mask\n     */\n    void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level,\n                       std::vector<lsh::BucketKey>& xor_masks)\n    {\n        xor_masks.push_back(key);\n        if (level == 0) return;\n        for (int index = lowest_index - 1; index >= 0; --index) {\n            // Create a new key\n            lsh::BucketKey new_key = key | (1 << index);\n            fill_xor_mask(new_key, index, level - 1, xor_masks);\n        }\n    }\n\n    /** Performs the approximate nearest-neighbor search.\n     * @param vec the feature to analyze\n     * @param do_radius flag indicating if we check the radius too\n     * @param radius the radius if it is a radius search\n     * @param do_k flag indicating if we limit the number of nn\n     * @param k_nn the number of nearest neighbors\n     * @param checked_average used for debugging\n     */\n    void getNeighbors(const ElementType* vec, bool /*do_radius*/, float radius, bool do_k, unsigned int k_nn,\n                      float& /*checked_average*/)\n    {\n        static std::vector<ScoreIndexPair> score_index_heap;\n\n        if (do_k) {\n            unsigned int worst_score = std::numeric_limits<unsigned int>::max();\n            typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();\n            typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();\n            for (; table != table_end; ++table) {\n                size_t key = table->getKey(vec);\n                std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();\n                std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();\n                for (; xor_mask != xor_mask_end; ++xor_mask) {\n                    size_t sub_key = key ^ (*xor_mask);\n                    const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);\n                    if (bucket == 0) continue;\n\n                    // Go over each descriptor index\n                    std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();\n                    std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();\n                    DistanceType hamming_distance;\n\n                    // Process the rest of the candidates\n                    for (; training_index < last_training_index; ++training_index) {\n                        hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols);\n\n                        if (hamming_distance < worst_score) {\n                            // Insert the new element\n                            score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));\n                            std::push_heap(score_index_heap.begin(), score_index_heap.end());\n\n                            if (score_index_heap.size() > (unsigned int)k_nn) {\n                                // Remove the highest distance value as we have too many elements\n                                std::pop_heap(score_index_heap.begin(), score_index_heap.end());\n                                score_index_heap.pop_back();\n                                // Keep track of the worst score\n                                worst_score = score_index_heap.front().first;\n                            }\n                        }\n                    }\n                }\n            }\n        }\n        else {\n            typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();\n            typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();\n            for (; table != table_end; ++table) {\n                size_t key = table->getKey(vec);\n                std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();\n                std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();\n                for (; xor_mask != xor_mask_end; ++xor_mask) {\n                    size_t sub_key = key ^ (*xor_mask);\n                    const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);\n                    if (bucket == 0) continue;\n\n                    // Go over each descriptor index\n                    std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();\n                    std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();\n                    DistanceType hamming_distance;\n\n                    // Process the rest of the candidates\n                    for (; training_index < last_training_index; ++training_index) {\n                        // Compute the Hamming distance\n                        hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols);\n                        if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));\n                    }\n                }\n            }\n        }\n    }\n\n    /** Performs the approximate nearest-neighbor search.\n     * This is a slower version than the above as it uses the ResultSet\n     * @param vec the feature to analyze\n     */\n    void getNeighbors(const ElementType* vec, ResultSet<DistanceType>& result)\n    {\n        typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();\n        typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();\n        for (; table != table_end; ++table) {\n            size_t key = table->getKey(vec);\n            std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();\n            std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();\n            for (; xor_mask != xor_mask_end; ++xor_mask) {\n                size_t sub_key = key ^ (*xor_mask);\n                const lsh::Bucket* bucket = table->getBucketFromKey((lsh::BucketKey)sub_key);\n                if (bucket == 0) continue;\n\n                // Go over each descriptor index\n                std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();\n                std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();\n                DistanceType hamming_distance;\n\n                // Process the rest of the candidates\n                for (; training_index < last_training_index; ++training_index) {\n                    // Compute the Hamming distance\n                    hamming_distance = distance_(vec, dataset_[*training_index], (int)dataset_.cols);\n                    result.addPoint(hamming_distance, *training_index);\n                }\n            }\n        }\n    }\n\n    /** The different hash tables */\n    std::vector<lsh::LshTable<ElementType> > tables_;\n\n    /** The data the LSH tables where built from */\n    Matrix<ElementType> dataset_;\n\n    /** The size of the features (as ElementType[]) */\n    unsigned int feature_size_;\n\n    IndexParams index_params_;\n\n    /** table number */\n    unsigned int table_number_;\n    /** key size */\n    unsigned int key_size_;\n    /** How far should we look for neighbors in multi-probe LSH */\n    unsigned int multi_probe_level_;\n\n    /** The XOR masks to apply to a key to get the neighboring buckets */\n    std::vector<lsh::BucketKey> xor_masks_;\n\n    Distance distance_;\n};\n}\n\n#endif //OPENCV_FLANN_LSH_INDEX_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/lsh_table.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n/***********************************************************************\n * Author: Vincent Rabaud\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_LSH_TABLE_H_\n#define OPENCV_FLANN_LSH_TABLE_H_\n\n#include <algorithm>\n#include <iostream>\n#include <iomanip>\n#include <limits.h>\n// TODO as soon as we use C++0x, use the code in USE_UNORDERED_MAP\n#ifdef __GXX_EXPERIMENTAL_CXX0X__\n#  define USE_UNORDERED_MAP 1\n#else\n#  define USE_UNORDERED_MAP 0\n#endif\n#if USE_UNORDERED_MAP\n#include <unordered_map>\n#else\n#include <map>\n#endif\n#include <math.h>\n#include <stddef.h>\n\n#include \"dynamic_bitset.h\"\n#include \"matrix.h\"\n\nnamespace cvflann\n{\n\nnamespace lsh\n{\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n/** What is stored in an LSH bucket\n */\ntypedef uint32_t FeatureIndex;\n/** The id from which we can get a bucket back in an LSH table\n */\ntypedef unsigned int BucketKey;\n\n/** A bucket in an LSH table\n */\ntypedef std::vector<FeatureIndex> Bucket;\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n/** POD for stats about an LSH table\n */\nstruct LshStats\n{\n    std::vector<unsigned int> bucket_sizes_;\n    size_t n_buckets_;\n    size_t bucket_size_mean_;\n    size_t bucket_size_median_;\n    size_t bucket_size_min_;\n    size_t bucket_size_max_;\n    size_t bucket_size_std_dev;\n    /** Each contained vector contains three value: beginning/end for interval, number of elements in the bin\n     */\n    std::vector<std::vector<unsigned int> > size_histogram_;\n};\n\n/** Overload the << operator for LshStats\n * @param out the streams\n * @param stats the stats to display\n * @return the streams\n */\ninline std::ostream& operator <<(std::ostream& out, const LshStats& stats)\n{\n    int w = 20;\n    out << \"Lsh Table Stats:\\n\" << std::setw(w) << std::setiosflags(std::ios::right) << \"N buckets : \"\n    << stats.n_buckets_ << \"\\n\" << std::setw(w) << std::setiosflags(std::ios::right) << \"mean size : \"\n    << std::setiosflags(std::ios::left) << stats.bucket_size_mean_ << \"\\n\" << std::setw(w)\n    << std::setiosflags(std::ios::right) << \"median size : \" << stats.bucket_size_median_ << \"\\n\" << std::setw(w)\n    << std::setiosflags(std::ios::right) << \"min size : \" << std::setiosflags(std::ios::left)\n    << stats.bucket_size_min_ << \"\\n\" << std::setw(w) << std::setiosflags(std::ios::right) << \"max size : \"\n    << std::setiosflags(std::ios::left) << stats.bucket_size_max_;\n\n    // Display the histogram\n    out << std::endl << std::setw(w) << std::setiosflags(std::ios::right) << \"histogram : \"\n    << std::setiosflags(std::ios::left);\n    for (std::vector<std::vector<unsigned int> >::const_iterator iterator = stats.size_histogram_.begin(), end =\n             stats.size_histogram_.end(); iterator != end; ++iterator) out << (*iterator)[0] << \"-\" << (*iterator)[1] << \": \" << (*iterator)[2] << \",  \";\n\n    return out;\n}\n\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n/** Lsh hash table. As its key is a sub-feature, and as usually\n * the size of it is pretty small, we keep it as a continuous memory array.\n * The value is an index in the corpus of features (we keep it as an unsigned\n * int for pure memory reasons, it could be a size_t)\n */\ntemplate<typename ElementType>\nclass LshTable\n{\npublic:\n    /** A container of all the feature indices. Optimized for space\n     */\n#if USE_UNORDERED_MAP\n    typedef std::unordered_map<BucketKey, Bucket> BucketsSpace;\n#else\n    typedef std::map<BucketKey, Bucket> BucketsSpace;\n#endif\n\n    /** A container of all the feature indices. Optimized for speed\n     */\n    typedef std::vector<Bucket> BucketsSpeed;\n\n    /** Default constructor\n     */\n    LshTable()\n    {\n    }\n\n    /** Default constructor\n     * Create the mask and allocate the memory\n     * @param feature_size is the size of the feature (considered as a ElementType[])\n     * @param key_size is the number of bits that are turned on in the feature\n     */\n    LshTable(unsigned int feature_size, unsigned int key_size)\n    {\n        (void)feature_size;\n        (void)key_size;\n        std::cerr << \"LSH is not implemented for that type\" << std::endl;\n        assert(0);\n    }\n\n    /** Add a feature to the table\n     * @param value the value to store for that feature\n     * @param feature the feature itself\n     */\n    void add(unsigned int value, const ElementType* feature)\n    {\n        // Add the value to the corresponding bucket\n        BucketKey key = (lsh::BucketKey)getKey(feature);\n\n        switch (speed_level_) {\n        case kArray:\n            // That means we get the buckets from an array\n            buckets_speed_[key].push_back(value);\n            break;\n        case kBitsetHash:\n            // That means we can check the bitset for the presence of a key\n            key_bitset_.set(key);\n            buckets_space_[key].push_back(value);\n            break;\n        case kHash:\n        {\n            // That means we have to check for the hash table for the presence of a key\n            buckets_space_[key].push_back(value);\n            break;\n        }\n        }\n    }\n\n    /** Add a set of features to the table\n     * @param dataset the values to store\n     */\n    void add(Matrix<ElementType> dataset)\n    {\n#if USE_UNORDERED_MAP\n        buckets_space_.rehash((buckets_space_.size() + dataset.rows) * 1.2);\n#endif\n        // Add the features to the table\n        for (unsigned int i = 0; i < dataset.rows; ++i) add(i, dataset[i]);\n        // Now that the table is full, optimize it for speed/space\n        optimize();\n    }\n\n    /** Get a bucket given the key\n     * @param key\n     * @return\n     */\n    inline const Bucket* getBucketFromKey(BucketKey key) const\n    {\n        // Generate other buckets\n        switch (speed_level_) {\n        case kArray:\n            // That means we get the buckets from an array\n            return &buckets_speed_[key];\n            break;\n        case kBitsetHash:\n            // That means we can check the bitset for the presence of a key\n            if (key_bitset_.test(key)) return &buckets_space_.find(key)->second;\n            else return 0;\n            break;\n        case kHash:\n        {\n            // That means we have to check for the hash table for the presence of a key\n            BucketsSpace::const_iterator bucket_it, bucket_end = buckets_space_.end();\n            bucket_it = buckets_space_.find(key);\n            // Stop here if that bucket does not exist\n            if (bucket_it == bucket_end) return 0;\n            else return &bucket_it->second;\n            break;\n        }\n        }\n        return 0;\n    }\n\n    /** Compute the sub-signature of a feature\n     */\n    size_t getKey(const ElementType* /*feature*/) const\n    {\n        std::cerr << \"LSH is not implemented for that type\" << std::endl;\n        assert(0);\n        return 1;\n    }\n\n    /** Get statistics about the table\n     * @return\n     */\n    LshStats getStats() const;\n\nprivate:\n    /** defines the speed fo the implementation\n     * kArray uses a vector for storing data\n     * kBitsetHash uses a hash map but checks for the validity of a key with a bitset\n     * kHash uses a hash map only\n     */\n    enum SpeedLevel\n    {\n        kArray, kBitsetHash, kHash\n    };\n\n    /** Initialize some variables\n     */\n    void initialize(size_t key_size)\n    {\n        const size_t key_size_lower_bound = 1;\n        //a value (size_t(1) << key_size) must fit the size_t type so key_size has to be strictly less than size of size_t\n        const size_t key_size_upper_bound = std::min(sizeof(BucketKey) * CHAR_BIT + 1, sizeof(size_t) * CHAR_BIT);\n        if (key_size < key_size_lower_bound || key_size >= key_size_upper_bound)\n        {\n            CV_Error(cv::Error::StsBadArg, cv::format(\"Invalid key_size (=%d). Valid values for your system are %d <= key_size < %d.\", (int)key_size, (int)key_size_lower_bound, (int)key_size_upper_bound));\n        }\n\n        speed_level_ = kHash;\n        key_size_ = (unsigned)key_size;\n    }\n\n    /** Optimize the table for speed/space\n     */\n    void optimize()\n    {\n        // If we are already using the fast storage, no need to do anything\n        if (speed_level_ == kArray) return;\n\n        // Use an array if it will be more than half full\n        if (buckets_space_.size() > ((size_t(1) << key_size_) / 2)) {\n            speed_level_ = kArray;\n            // Fill the array version of it\n            buckets_speed_.resize(size_t(1) << key_size_);\n            for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) buckets_speed_[key_bucket->first] = key_bucket->second;\n\n            // Empty the hash table\n            buckets_space_.clear();\n            return;\n        }\n\n        // If the bitset is going to use less than 10% of the RAM of the hash map (at least 1 size_t for the key and two\n        // for the vector) or less than 512MB (key_size_ <= 30)\n        if (((std::max(buckets_space_.size(), buckets_speed_.size()) * CHAR_BIT * 3 * sizeof(BucketKey)) / 10\n             >= (size_t(1) << key_size_)) || (key_size_ <= 32)) {\n            speed_level_ = kBitsetHash;\n            key_bitset_.resize(size_t(1) << key_size_);\n            key_bitset_.reset();\n            // Try with the BucketsSpace\n            for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) key_bitset_.set(key_bucket->first);\n        }\n        else {\n            speed_level_ = kHash;\n            key_bitset_.clear();\n        }\n    }\n\n    /** The vector of all the buckets if they are held for speed\n     */\n    BucketsSpeed buckets_speed_;\n\n    /** The hash table of all the buckets in case we cannot use the speed version\n     */\n    BucketsSpace buckets_space_;\n\n    /** What is used to store the data */\n    SpeedLevel speed_level_;\n\n    /** If the subkey is small enough, it will keep track of which subkeys are set through that bitset\n     * That is just a speedup so that we don't look in the hash table (which can be mush slower that checking a bitset)\n     */\n    DynamicBitset key_bitset_;\n\n    /** The size of the sub-signature in bits\n     */\n    unsigned int key_size_;\n\n    // Members only used for the unsigned char specialization\n    /** The mask to apply to a feature to get the hash key\n     * Only used in the unsigned char case\n     */\n    std::vector<size_t> mask_;\n};\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n// Specialization for unsigned char\n\ntemplate<>\ninline LshTable<unsigned char>::LshTable(unsigned int feature_size, unsigned int subsignature_size)\n{\n    initialize(subsignature_size);\n    // Allocate the mask\n    mask_ = std::vector<size_t>((size_t)ceil((float)(feature_size * sizeof(char)) / (float)sizeof(size_t)), 0);\n\n    // A bit brutal but fast to code\n    std::vector<size_t> indices(feature_size * CHAR_BIT);\n    for (size_t i = 0; i < feature_size * CHAR_BIT; ++i) indices[i] = i;\n    std::random_shuffle(indices.begin(), indices.end());\n\n    // Generate a random set of order of subsignature_size_ bits\n    for (unsigned int i = 0; i < key_size_; ++i) {\n        size_t index = indices[i];\n\n        // Set that bit in the mask\n        size_t divisor = CHAR_BIT * sizeof(size_t);\n        size_t idx = index / divisor; //pick the right size_t index\n        mask_[idx] |= size_t(1) << (index % divisor); //use modulo to find the bit offset\n    }\n\n    // Set to 1 if you want to display the mask for debug\n#if 0\n    {\n        size_t bcount = 0;\n        BOOST_FOREACH(size_t mask_block, mask_){\n            out << std::setw(sizeof(size_t) * CHAR_BIT / 4) << std::setfill('0') << std::hex << mask_block\n                << std::endl;\n            bcount += __builtin_popcountll(mask_block);\n        }\n        out << \"bit count : \" << std::dec << bcount << std::endl;\n        out << \"mask size : \" << mask_.size() << std::endl;\n        return out;\n    }\n#endif\n}\n\n/** Return the Subsignature of a feature\n * @param feature the feature to analyze\n */\ntemplate<>\ninline size_t LshTable<unsigned char>::getKey(const unsigned char* feature) const\n{\n    // no need to check if T is dividable by sizeof(size_t) like in the Hamming\n    // distance computation as we have a mask\n    const size_t* feature_block_ptr = reinterpret_cast<const size_t*> ((const void*)feature);\n\n    // Figure out the subsignature of the feature\n    // Given the feature ABCDEF, and the mask 001011, the output will be\n    // 000CEF\n    size_t subsignature = 0;\n    size_t bit_index = 1;\n\n    for (std::vector<size_t>::const_iterator pmask_block = mask_.begin(); pmask_block != mask_.end(); ++pmask_block) {\n        // get the mask and signature blocks\n        size_t feature_block = *feature_block_ptr;\n        size_t mask_block = *pmask_block;\n        while (mask_block) {\n            // Get the lowest set bit in the mask block\n            size_t lowest_bit = mask_block & (-(ptrdiff_t)mask_block);\n            // Add it to the current subsignature if necessary\n            subsignature += (feature_block & lowest_bit) ? bit_index : 0;\n            // Reset the bit in the mask block\n            mask_block ^= lowest_bit;\n            // increment the bit index for the subsignature\n            bit_index <<= 1;\n        }\n        // Check the next feature block\n        ++feature_block_ptr;\n    }\n    return subsignature;\n}\n\ntemplate<>\ninline LshStats LshTable<unsigned char>::getStats() const\n{\n    LshStats stats;\n    stats.bucket_size_mean_ = 0;\n    if ((buckets_speed_.empty()) && (buckets_space_.empty())) {\n        stats.n_buckets_ = 0;\n        stats.bucket_size_median_ = 0;\n        stats.bucket_size_min_ = 0;\n        stats.bucket_size_max_ = 0;\n        return stats;\n    }\n\n    if (!buckets_speed_.empty()) {\n        for (BucketsSpeed::const_iterator pbucket = buckets_speed_.begin(); pbucket != buckets_speed_.end(); ++pbucket) {\n            stats.bucket_sizes_.push_back((lsh::FeatureIndex)pbucket->size());\n            stats.bucket_size_mean_ += pbucket->size();\n        }\n        stats.bucket_size_mean_ /= buckets_speed_.size();\n        stats.n_buckets_ = buckets_speed_.size();\n    }\n    else {\n        for (BucketsSpace::const_iterator x = buckets_space_.begin(); x != buckets_space_.end(); ++x) {\n            stats.bucket_sizes_.push_back((lsh::FeatureIndex)x->second.size());\n            stats.bucket_size_mean_ += x->second.size();\n        }\n        stats.bucket_size_mean_ /= buckets_space_.size();\n        stats.n_buckets_ = buckets_space_.size();\n    }\n\n    std::sort(stats.bucket_sizes_.begin(), stats.bucket_sizes_.end());\n\n    //  BOOST_FOREACH(int size, stats.bucket_sizes_)\n    //          std::cout << size << \" \";\n    //  std::cout << std::endl;\n    stats.bucket_size_median_ = stats.bucket_sizes_[stats.bucket_sizes_.size() / 2];\n    stats.bucket_size_min_ = stats.bucket_sizes_.front();\n    stats.bucket_size_max_ = stats.bucket_sizes_.back();\n\n    // TODO compute mean and std\n    /*float mean, stddev;\n       stats.bucket_size_mean_ = mean;\n       stats.bucket_size_std_dev = stddev;*/\n\n    // Include a histogram of the buckets\n    unsigned int bin_start = 0;\n    unsigned int bin_end = 20;\n    bool is_new_bin = true;\n    for (std::vector<unsigned int>::iterator iterator = stats.bucket_sizes_.begin(), end = stats.bucket_sizes_.end(); iterator\n         != end; )\n        if (*iterator < bin_end) {\n            if (is_new_bin) {\n                stats.size_histogram_.push_back(std::vector<unsigned int>(3, 0));\n                stats.size_histogram_.back()[0] = bin_start;\n                stats.size_histogram_.back()[1] = bin_end - 1;\n                is_new_bin = false;\n            }\n            ++stats.size_histogram_.back()[2];\n            ++iterator;\n        }\n        else {\n            bin_start += 20;\n            bin_end += 20;\n            is_new_bin = true;\n        }\n\n    return stats;\n}\n\n// End the two namespaces\n}\n}\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n#endif /* OPENCV_FLANN_LSH_TABLE_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/matrix.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_DATASET_H_\n#define OPENCV_FLANN_DATASET_H_\n\n#include <stdio.h>\n\n#include \"general.h\"\n\nnamespace cvflann\n{\n\n/**\n * Class that implements a simple rectangular matrix stored in a memory buffer and\n * provides convenient matrix-like access using the [] operators.\n */\ntemplate <typename T>\nclass Matrix\n{\npublic:\n    typedef T type;\n\n    size_t rows;\n    size_t cols;\n    size_t stride;\n    T* data;\n\n    Matrix() : rows(0), cols(0), stride(0), data(NULL)\n    {\n    }\n\n    Matrix(T* data_, size_t rows_, size_t cols_, size_t stride_ = 0) :\n        rows(rows_), cols(cols_),  stride(stride_), data(data_)\n    {\n        if (stride==0) stride = cols;\n    }\n\n    /**\n     * Convenience function for deallocating the storage data.\n     */\n    FLANN_DEPRECATED void free()\n    {\n        fprintf(stderr, \"The cvflann::Matrix<T>::free() method is deprecated \"\n                \"and it does not do any memory deallocation any more.  You are\"\n                \"responsible for deallocating the matrix memory (by doing\"\n                \"'delete[] matrix.data' for example)\");\n    }\n\n    /**\n     * Operator that return a (pointer to a) row of the data.\n     */\n    T* operator[](size_t index) const\n    {\n        return data+index*stride;\n    }\n};\n\n\nclass UntypedMatrix\n{\npublic:\n    size_t rows;\n    size_t cols;\n    void* data;\n    flann_datatype_t type;\n\n    UntypedMatrix(void* data_, long rows_, long cols_) :\n        rows(rows_), cols(cols_), data(data_)\n    {\n    }\n\n    ~UntypedMatrix()\n    {\n    }\n\n\n    template<typename T>\n    Matrix<T> as()\n    {\n        return Matrix<T>((T*)data, rows, cols);\n    }\n};\n\n\n\n}\n\n#endif //OPENCV_FLANN_DATASET_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/miniflann.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef _OPENCV_MINIFLANN_HPP_\n#define _OPENCV_MINIFLANN_HPP_\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/flann/defines.h\"\n\nnamespace cv\n{\n\nnamespace flann\n{\n\nstruct CV_EXPORTS IndexParams\n{\n    IndexParams();\n    ~IndexParams();\n\n    String getString(const String& key, const String& defaultVal=String()) const;\n    int getInt(const String& key, int defaultVal=-1) const;\n    double getDouble(const String& key, double defaultVal=-1) const;\n\n    void setString(const String& key, const String& value);\n    void setInt(const String& key, int value);\n    void setDouble(const String& key, double value);\n    void setFloat(const String& key, float value);\n    void setBool(const String& key, bool value);\n    void setAlgorithm(int value);\n\n    void getAll(std::vector<String>& names,\n                std::vector<int>& types,\n                std::vector<String>& strValues,\n                std::vector<double>& numValues) const;\n\n    void* params;\n};\n\nstruct CV_EXPORTS KDTreeIndexParams : public IndexParams\n{\n    KDTreeIndexParams(int trees=4);\n};\n\nstruct CV_EXPORTS LinearIndexParams : public IndexParams\n{\n    LinearIndexParams();\n};\n\nstruct CV_EXPORTS CompositeIndexParams : public IndexParams\n{\n    CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11,\n                         cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f );\n};\n\nstruct CV_EXPORTS AutotunedIndexParams : public IndexParams\n{\n    AutotunedIndexParams(float target_precision = 0.8f, float build_weight = 0.01f,\n                         float memory_weight = 0, float sample_fraction = 0.1f);\n};\n\nstruct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams\n{\n    HierarchicalClusteringIndexParams(int branching = 32,\n                      cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, int trees = 4, int leaf_size = 100 );\n};\n\nstruct CV_EXPORTS KMeansIndexParams : public IndexParams\n{\n    KMeansIndexParams(int branching = 32, int iterations = 11,\n                      cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f );\n};\n\nstruct CV_EXPORTS LshIndexParams : public IndexParams\n{\n    LshIndexParams(int table_number, int key_size, int multi_probe_level);\n};\n\nstruct CV_EXPORTS SavedIndexParams : public IndexParams\n{\n    SavedIndexParams(const String& filename);\n};\n\nstruct CV_EXPORTS SearchParams : public IndexParams\n{\n    SearchParams( int checks = 32, float eps = 0, bool sorted = true );\n};\n\nclass CV_EXPORTS_W Index\n{\npublic:\n    CV_WRAP Index();\n    CV_WRAP Index(InputArray features, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2);\n    virtual ~Index();\n\n    CV_WRAP virtual void build(InputArray features, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2);\n    CV_WRAP virtual void knnSearch(InputArray query, OutputArray indices,\n                   OutputArray dists, int knn, const SearchParams& params=SearchParams());\n\n    CV_WRAP virtual int radiusSearch(InputArray query, OutputArray indices,\n                             OutputArray dists, double radius, int maxResults,\n                             const SearchParams& params=SearchParams());\n\n    CV_WRAP virtual void save(const String& filename) const;\n    CV_WRAP virtual bool load(InputArray features, const String& filename);\n    CV_WRAP virtual void release();\n    CV_WRAP cvflann::flann_distance_t getDistance() const;\n    CV_WRAP cvflann::flann_algorithm_t getAlgorithm() const;\n\nprotected:\n    cvflann::flann_distance_t distType;\n    cvflann::flann_algorithm_t algo;\n    int featureType;\n    void* index;\n};\n\n} } // namespace cv::flann\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/nn_index.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_NNINDEX_H\n#define OPENCV_FLANN_NNINDEX_H\n\n#include \"general.h\"\n#include \"matrix.h\"\n#include \"result_set.h\"\n#include \"params.h\"\n\nnamespace cvflann\n{\n\n/**\n * Nearest-neighbour index base class\n */\ntemplate <typename Distance>\nclass NNIndex\n{\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\npublic:\n\n    virtual ~NNIndex() {}\n\n    /**\n     * \\brief Builds the index\n     */\n    virtual void buildIndex() = 0;\n\n    /**\n     * \\brief Perform k-nearest neighbor search\n     * \\param[in] queries The query points for which to find the nearest neighbors\n     * \\param[out] indices The indices of the nearest neighbors found\n     * \\param[out] dists Distances to the nearest neighbors found\n     * \\param[in] knn Number of nearest neighbors to return\n     * \\param[in] params Search parameters\n     */\n    virtual void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)\n    {\n        assert(queries.cols == veclen());\n        assert(indices.rows >= queries.rows);\n        assert(dists.rows >= queries.rows);\n        assert(int(indices.cols) >= knn);\n        assert(int(dists.cols) >= knn);\n\n#if 0\n        KNNResultSet<DistanceType> resultSet(knn);\n        for (size_t i = 0; i < queries.rows; i++) {\n            resultSet.init(indices[i], dists[i]);\n            findNeighbors(resultSet, queries[i], params);\n        }\n#else\n        KNNUniqueResultSet<DistanceType> resultSet(knn);\n        for (size_t i = 0; i < queries.rows; i++) {\n            resultSet.clear();\n            findNeighbors(resultSet, queries[i], params);\n            if (get_param(params,\"sorted\",true)) resultSet.sortAndCopy(indices[i], dists[i], knn);\n            else resultSet.copy(indices[i], dists[i], knn);\n        }\n#endif\n    }\n\n    /**\n     * \\brief Perform radius search\n     * \\param[in] query The query point\n     * \\param[out] indices The indinces of the neighbors found within the given radius\n     * \\param[out] dists The distances to the nearest neighbors found\n     * \\param[in] radius The radius used for search\n     * \\param[in] params Search parameters\n     * \\returns Number of neighbors found\n     */\n    virtual int radiusSearch(const Matrix<ElementType>& query, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params)\n    {\n        if (query.rows != 1) {\n            fprintf(stderr, \"I can only search one feature at a time for range search\\n\");\n            return -1;\n        }\n        assert(query.cols == veclen());\n        assert(indices.cols == dists.cols);\n\n        int n = 0;\n        int* indices_ptr = NULL;\n        DistanceType* dists_ptr = NULL;\n        if (indices.cols > 0) {\n            n = (int)indices.cols;\n            indices_ptr = indices[0];\n            dists_ptr = dists[0];\n        }\n\n        RadiusUniqueResultSet<DistanceType> resultSet((DistanceType)radius);\n        resultSet.clear();\n        findNeighbors(resultSet, query[0], params);\n        if (n>0) {\n            if (get_param(params,\"sorted\",true)) resultSet.sortAndCopy(indices_ptr, dists_ptr, n);\n            else resultSet.copy(indices_ptr, dists_ptr, n);\n        }\n\n        return (int)resultSet.size();\n    }\n\n    /**\n     * \\brief Saves the index to a stream\n     * \\param stream The stream to save the index to\n     */\n    virtual void saveIndex(FILE* stream) = 0;\n\n    /**\n     * \\brief Loads the index from a stream\n     * \\param stream The stream from which the index is loaded\n     */\n    virtual void loadIndex(FILE* stream) = 0;\n\n    /**\n     * \\returns number of features in this index.\n     */\n    virtual size_t size() const = 0;\n\n    /**\n     * \\returns The dimensionality of the features in this index.\n     */\n    virtual size_t veclen() const = 0;\n\n    /**\n     * \\returns The amount of memory (in bytes) used by the index.\n     */\n    virtual int usedMemory() const = 0;\n\n    /**\n     * \\returns The index type (kdtree, kmeans,...)\n     */\n    virtual flann_algorithm_t getType() const = 0;\n\n    /**\n     * \\returns The index parameters\n     */\n    virtual IndexParams getParameters() const = 0;\n\n\n    /**\n     * \\brief Method that searches for nearest-neighbours\n     */\n    virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) = 0;\n};\n\n}\n\n#endif //OPENCV_FLANN_NNINDEX_H\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/object_factory.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_OBJECT_FACTORY_H_\n#define OPENCV_FLANN_OBJECT_FACTORY_H_\n\n#include <map>\n\nnamespace cvflann\n{\n\nclass CreatorNotFound\n{\n};\n\ntemplate<typename BaseClass,\n         typename UniqueIdType,\n         typename ObjectCreator = BaseClass* (*)()>\nclass ObjectFactory\n{\n    typedef ObjectFactory<BaseClass,UniqueIdType,ObjectCreator> ThisClass;\n    typedef std::map<UniqueIdType, ObjectCreator> ObjectRegistry;\n\n    // singleton class, private constructor\n    ObjectFactory() {}\n\npublic:\n\n    bool subscribe(UniqueIdType id, ObjectCreator creator)\n    {\n        if (object_registry.find(id) != object_registry.end()) return false;\n\n        object_registry[id] = creator;\n        return true;\n    }\n\n    bool unregister(UniqueIdType id)\n    {\n        return object_registry.erase(id) == 1;\n    }\n\n    ObjectCreator create(UniqueIdType id)\n    {\n        typename ObjectRegistry::const_iterator iter = object_registry.find(id);\n\n        if (iter == object_registry.end()) {\n            throw CreatorNotFound();\n        }\n\n        return iter->second;\n    }\n\n    static ThisClass& instance()\n    {\n        static ThisClass the_factory;\n        return the_factory;\n    }\nprivate:\n    ObjectRegistry object_registry;\n};\n\n}\n\n#endif /* OPENCV_FLANN_OBJECT_FACTORY_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/params.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2011  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2011  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n\n#ifndef OPENCV_FLANN_PARAMS_H_\n#define OPENCV_FLANN_PARAMS_H_\n\n#include \"any.h\"\n#include \"general.h\"\n#include <iostream>\n#include <map>\n\n\nnamespace cvflann\n{\n\ntypedef std::map<cv::String, any> IndexParams;\n\nstruct SearchParams : public IndexParams\n{\n    SearchParams(int checks = 32, float eps = 0, bool sorted = true )\n    {\n        // how many leafs to visit when searching for neighbours (-1 for unlimited)\n        (*this)[\"checks\"] = checks;\n        // search for eps-approximate neighbours (default: 0)\n        (*this)[\"eps\"] = eps;\n        // only for radius search, require neighbours sorted by distance (default: true)\n        (*this)[\"sorted\"] = sorted;\n    }\n};\n\n\ntemplate<typename T>\nT get_param(const IndexParams& params, cv::String name, const T& default_value)\n{\n    IndexParams::const_iterator it = params.find(name);\n    if (it != params.end()) {\n        return it->second.cast<T>();\n    }\n    else {\n        return default_value;\n    }\n}\n\ntemplate<typename T>\nT get_param(const IndexParams& params, cv::String name)\n{\n    IndexParams::const_iterator it = params.find(name);\n    if (it != params.end()) {\n        return it->second.cast<T>();\n    }\n    else {\n        throw FLANNException(cv::String(\"Missing parameter '\")+name+cv::String(\"' in the parameters given\"));\n    }\n}\n\ninline void print_params(const IndexParams& params, std::ostream& stream)\n{\n    IndexParams::const_iterator it;\n\n    for(it=params.begin(); it!=params.end(); ++it) {\n        stream << it->first << \" : \" << it->second << std::endl;\n    }\n}\n\ninline void print_params(const IndexParams& params)\n{\n    print_params(params, std::cout);\n}\n\n}\n\n\n#endif /* OPENCV_FLANN_PARAMS_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/random.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_RANDOM_H\n#define OPENCV_FLANN_RANDOM_H\n\n#include <algorithm>\n#include <cstdlib>\n#include <vector>\n\n#include \"general.h\"\n\nnamespace cvflann\n{\n\n/**\n * Seeds the random number generator\n *  @param seed Random seed\n */\ninline void seed_random(unsigned int seed)\n{\n    srand(seed);\n}\n\n/*\n * Generates a random double value.\n */\n/**\n * Generates a random double value.\n * @param high Upper limit\n * @param low Lower limit\n * @return Random double value\n */\ninline double rand_double(double high = 1.0, double low = 0)\n{\n    return low + ((high-low) * (std::rand() / (RAND_MAX + 1.0)));\n}\n\n/**\n * Generates a random integer value.\n * @param high Upper limit\n * @param low Lower limit\n * @return Random integer value\n */\ninline int rand_int(int high = RAND_MAX, int low = 0)\n{\n    return low + (int) ( double(high-low) * (std::rand() / (RAND_MAX + 1.0)));\n}\n\n/**\n * Random number generator that returns a distinct number from\n * the [0,n) interval each time.\n */\nclass UniqueRandom\n{\n    std::vector<int> vals_;\n    int size_;\n    int counter_;\n\npublic:\n    /**\n     * Constructor.\n     * @param n Size of the interval from which to generate\n     * @return\n     */\n    UniqueRandom(int n)\n    {\n        init(n);\n    }\n\n    /**\n     * Initializes the number generator.\n     * @param n the size of the interval from which to generate random numbers.\n     */\n    void init(int n)\n    {\n        // create and initialize an array of size n\n        vals_.resize(n);\n        size_ = n;\n        for (int i = 0; i < size_; ++i) vals_[i] = i;\n\n        // shuffle the elements in the array\n        std::random_shuffle(vals_.begin(), vals_.end());\n\n        counter_ = 0;\n    }\n\n    /**\n     * Return a distinct random integer in greater or equal to 0 and less\n     * than 'n' on each call. It should be called maximum 'n' times.\n     * Returns: a random integer\n     */\n    int next()\n    {\n        if (counter_ == size_) {\n            return -1;\n        }\n        else {\n            return vals_[counter_++];\n        }\n    }\n};\n\n}\n\n#endif //OPENCV_FLANN_RANDOM_H\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/result_set.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_RESULTSET_H\n#define OPENCV_FLANN_RESULTSET_H\n\n#include <algorithm>\n#include <cstring>\n#include <iostream>\n#include <limits>\n#include <set>\n#include <vector>\n\nnamespace cvflann\n{\n\n/* This record represents a branch point when finding neighbors in\n    the tree.  It contains a record of the minimum distance to the query\n    point, as well as the node at which the search resumes.\n */\n\ntemplate <typename T, typename DistanceType>\nstruct BranchStruct\n{\n    T node;           /* Tree node at which search resumes */\n    DistanceType mindist;     /* Minimum distance to query for all nodes below. */\n\n    BranchStruct() {}\n    BranchStruct(const T& aNode, DistanceType dist) : node(aNode), mindist(dist) {}\n\n    bool operator<(const BranchStruct<T, DistanceType>& rhs) const\n    {\n        return mindist<rhs.mindist;\n    }\n};\n\n\ntemplate <typename DistanceType>\nclass ResultSet\n{\npublic:\n    virtual ~ResultSet() {}\n\n    virtual bool full() const = 0;\n\n    virtual void addPoint(DistanceType dist, int index) = 0;\n\n    virtual DistanceType worstDist() const = 0;\n\n};\n\n/**\n * KNNSimpleResultSet does not ensure that the element it holds are unique.\n * Is used in those cases where the nearest neighbour algorithm used does not\n * attempt to insert the same element multiple times.\n */\ntemplate <typename DistanceType>\nclass KNNSimpleResultSet : public ResultSet<DistanceType>\n{\n    int* indices;\n    DistanceType* dists;\n    int capacity;\n    int count;\n    DistanceType worst_distance_;\n\npublic:\n    KNNSimpleResultSet(int capacity_) : capacity(capacity_), count(0)\n    {\n    }\n\n    void init(int* indices_, DistanceType* dists_)\n    {\n        indices = indices_;\n        dists = dists_;\n        count = 0;\n        worst_distance_ = (std::numeric_limits<DistanceType>::max)();\n        dists[capacity-1] = worst_distance_;\n    }\n\n    size_t size() const\n    {\n        return count;\n    }\n\n    bool full() const\n    {\n        return count == capacity;\n    }\n\n\n    void addPoint(DistanceType dist, int index)\n    {\n        if (dist >= worst_distance_) return;\n        int i;\n        for (i=count; i>0; --i) {\n#ifdef FLANN_FIRST_MATCH\n            if ( (dists[i-1]>dist) || ((dist==dists[i-1])&&(indices[i-1]>index)) )\n#else\n            if (dists[i-1]>dist)\n#endif\n            {\n                if (i<capacity) {\n                    dists[i] = dists[i-1];\n                    indices[i] = indices[i-1];\n                }\n            }\n            else break;\n        }\n        if (count < capacity) ++count;\n        dists[i] = dist;\n        indices[i] = index;\n        worst_distance_ = dists[capacity-1];\n    }\n\n    DistanceType worstDist() const\n    {\n        return worst_distance_;\n    }\n};\n\n/**\n * K-Nearest neighbour result set. Ensures that the elements inserted are unique\n */\ntemplate <typename DistanceType>\nclass KNNResultSet : public ResultSet<DistanceType>\n{\n    int* indices;\n    DistanceType* dists;\n    int capacity;\n    int count;\n    DistanceType worst_distance_;\n\npublic:\n    KNNResultSet(int capacity_) : capacity(capacity_), count(0)\n    {\n    }\n\n    void init(int* indices_, DistanceType* dists_)\n    {\n        indices = indices_;\n        dists = dists_;\n        count = 0;\n        worst_distance_ = (std::numeric_limits<DistanceType>::max)();\n        dists[capacity-1] = worst_distance_;\n    }\n\n    size_t size() const\n    {\n        return count;\n    }\n\n    bool full() const\n    {\n        return count == capacity;\n    }\n\n\n    void addPoint(DistanceType dist, int index)\n    {\n        if (dist >= worst_distance_) return;\n        int i;\n        for (i = count; i > 0; --i) {\n#ifdef FLANN_FIRST_MATCH\n            if ( (dists[i-1]<=dist) && ((dist!=dists[i-1])||(indices[i-1]<=index)) )\n#else\n            if (dists[i-1]<=dist)\n#endif\n            {\n                // Check for duplicate indices\n                int j = i - 1;\n                while ((j >= 0) && (dists[j] == dist)) {\n                    if (indices[j] == index) {\n                        return;\n                    }\n                    --j;\n                }\n                break;\n            }\n        }\n\n        if (count < capacity) ++count;\n        for (int j = count-1; j > i; --j) {\n            dists[j] = dists[j-1];\n            indices[j] = indices[j-1];\n        }\n        dists[i] = dist;\n        indices[i] = index;\n        worst_distance_ = dists[capacity-1];\n    }\n\n    DistanceType worstDist() const\n    {\n        return worst_distance_;\n    }\n};\n\n\n/**\n * A result-set class used when performing a radius based search.\n */\ntemplate <typename DistanceType>\nclass RadiusResultSet : public ResultSet<DistanceType>\n{\n    DistanceType radius;\n    int* indices;\n    DistanceType* dists;\n    size_t capacity;\n    size_t count;\n\npublic:\n    RadiusResultSet(DistanceType radius_, int* indices_, DistanceType* dists_, int capacity_) :\n        radius(radius_), indices(indices_), dists(dists_), capacity(capacity_)\n    {\n        init();\n    }\n\n    ~RadiusResultSet()\n    {\n    }\n\n    void init()\n    {\n        count = 0;\n    }\n\n    size_t size() const\n    {\n        return count;\n    }\n\n    bool full() const\n    {\n        return true;\n    }\n\n    void addPoint(DistanceType dist, int index)\n    {\n        if (dist<radius) {\n            if ((capacity>0)&&(count < capacity)) {\n                dists[count] = dist;\n                indices[count] = index;\n            }\n            count++;\n        }\n    }\n\n    DistanceType worstDist() const\n    {\n        return radius;\n    }\n\n};\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n/** Class that holds the k NN neighbors\n * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays\n */\ntemplate<typename DistanceType>\nclass UniqueResultSet : public ResultSet<DistanceType>\n{\npublic:\n    struct DistIndex\n    {\n        DistIndex(DistanceType dist, unsigned int index) :\n            dist_(dist), index_(index)\n        {\n        }\n        bool operator<(const DistIndex dist_index) const\n        {\n            return (dist_ < dist_index.dist_) || ((dist_ == dist_index.dist_) && index_ < dist_index.index_);\n        }\n        DistanceType dist_;\n        unsigned int index_;\n    };\n\n    /** Default cosntructor */\n    UniqueResultSet() :\n        worst_distance_(std::numeric_limits<DistanceType>::max())\n    {\n    }\n\n    /** Check the status of the set\n     * @return true if we have k NN\n     */\n    inline bool full() const\n    {\n        return is_full_;\n    }\n\n    /** Remove all elements in the set\n     */\n    virtual void clear() = 0;\n\n    /** Copy the set to two C arrays\n     * @param indices pointer to a C array of indices\n     * @param dist pointer to a C array of distances\n     * @param n_neighbors the number of neighbors to copy\n     */\n    virtual void copy(int* indices, DistanceType* dist, int n_neighbors = -1) const\n    {\n        if (n_neighbors < 0) {\n            for (typename std::set<DistIndex>::const_iterator dist_index = dist_indices_.begin(), dist_index_end =\n                     dist_indices_.end(); dist_index != dist_index_end; ++dist_index, ++indices, ++dist) {\n                *indices = dist_index->index_;\n                *dist = dist_index->dist_;\n            }\n        }\n        else {\n            int i = 0;\n            for (typename std::set<DistIndex>::const_iterator dist_index = dist_indices_.begin(), dist_index_end =\n                     dist_indices_.end(); (dist_index != dist_index_end) && (i < n_neighbors); ++dist_index, ++indices, ++dist, ++i) {\n                *indices = dist_index->index_;\n                *dist = dist_index->dist_;\n            }\n        }\n    }\n\n    /** Copy the set to two C arrays but sort it according to the distance first\n     * @param indices pointer to a C array of indices\n     * @param dist pointer to a C array of distances\n     * @param n_neighbors the number of neighbors to copy\n     */\n    virtual void sortAndCopy(int* indices, DistanceType* dist, int n_neighbors = -1) const\n    {\n        copy(indices, dist, n_neighbors);\n    }\n\n    /** The number of neighbors in the set\n     * @return\n     */\n    size_t size() const\n    {\n        return dist_indices_.size();\n    }\n\n    /** The distance of the furthest neighbor\n     * If we don't have enough neighbors, it returns the max possible value\n     * @return\n     */\n    inline DistanceType worstDist() const\n    {\n        return worst_distance_;\n    }\nprotected:\n    /** Flag to say if the set is full */\n    bool is_full_;\n\n    /** The worst distance found so far */\n    DistanceType worst_distance_;\n\n    /** The best candidates so far */\n    std::set<DistIndex> dist_indices_;\n};\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n/** Class that holds the k NN neighbors\n * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays\n */\ntemplate<typename DistanceType>\nclass KNNUniqueResultSet : public UniqueResultSet<DistanceType>\n{\npublic:\n    /** Constructor\n     * @param capacity the number of neighbors to store at max\n     */\n    KNNUniqueResultSet(unsigned int capacity) : capacity_(capacity)\n    {\n        this->is_full_ = false;\n        this->clear();\n    }\n\n    /** Add a possible candidate to the best neighbors\n     * @param dist distance for that neighbor\n     * @param index index of that neighbor\n     */\n    inline void addPoint(DistanceType dist, int index)\n    {\n        // Don't do anything if we are worse than the worst\n        if (dist >= worst_distance_) return;\n        dist_indices_.insert(DistIndex(dist, index));\n\n        if (is_full_) {\n            if (dist_indices_.size() > capacity_) {\n                dist_indices_.erase(*dist_indices_.rbegin());\n                worst_distance_ = dist_indices_.rbegin()->dist_;\n            }\n        }\n        else if (dist_indices_.size() == capacity_) {\n            is_full_ = true;\n            worst_distance_ = dist_indices_.rbegin()->dist_;\n        }\n    }\n\n    /** Remove all elements in the set\n     */\n    void clear()\n    {\n        dist_indices_.clear();\n        worst_distance_ = std::numeric_limits<DistanceType>::max();\n        is_full_ = false;\n    }\n\nprotected:\n    typedef typename UniqueResultSet<DistanceType>::DistIndex DistIndex;\n    using UniqueResultSet<DistanceType>::is_full_;\n    using UniqueResultSet<DistanceType>::worst_distance_;\n    using UniqueResultSet<DistanceType>::dist_indices_;\n\n    /** The number of neighbors to keep */\n    unsigned int capacity_;\n};\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n/** Class that holds the radius nearest neighbors\n * It is more accurate than RadiusResult as it is not limited in the number of neighbors\n */\ntemplate<typename DistanceType>\nclass RadiusUniqueResultSet : public UniqueResultSet<DistanceType>\n{\npublic:\n    /** Constructor\n     * @param radius the maximum distance of a neighbor\n     */\n    RadiusUniqueResultSet(DistanceType radius) :\n        radius_(radius)\n    {\n        is_full_ = true;\n    }\n\n    /** Add a possible candidate to the best neighbors\n     * @param dist distance for that neighbor\n     * @param index index of that neighbor\n     */\n    void addPoint(DistanceType dist, int index)\n    {\n        if (dist <= radius_) dist_indices_.insert(DistIndex(dist, index));\n    }\n\n    /** Remove all elements in the set\n     */\n    inline void clear()\n    {\n        dist_indices_.clear();\n    }\n\n\n    /** Check the status of the set\n     * @return alwys false\n     */\n    inline bool full() const\n    {\n        return true;\n    }\n\n    /** The distance of the furthest neighbor\n     * If we don't have enough neighbors, it returns the max possible value\n     * @return\n     */\n    inline DistanceType worstDist() const\n    {\n        return radius_;\n    }\nprivate:\n    typedef typename UniqueResultSet<DistanceType>::DistIndex DistIndex;\n    using UniqueResultSet<DistanceType>::dist_indices_;\n    using UniqueResultSet<DistanceType>::is_full_;\n\n    /** The furthest distance a neighbor can be */\n    DistanceType radius_;\n};\n\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n/** Class that holds the k NN neighbors within a radius distance\n */\ntemplate<typename DistanceType>\nclass KNNRadiusUniqueResultSet : public KNNUniqueResultSet<DistanceType>\n{\npublic:\n    /** Constructor\n     * @param capacity the number of neighbors to store at max\n     * @param radius the maximum distance of a neighbor\n     */\n    KNNRadiusUniqueResultSet(unsigned int capacity, DistanceType radius)\n    {\n        this->capacity_ = capacity;\n        this->radius_ = radius;\n        this->dist_indices_.reserve(capacity_);\n        this->clear();\n    }\n\n    /** Remove all elements in the set\n     */\n    void clear()\n    {\n        dist_indices_.clear();\n        worst_distance_ = radius_;\n        is_full_ = false;\n    }\nprivate:\n    using KNNUniqueResultSet<DistanceType>::dist_indices_;\n    using KNNUniqueResultSet<DistanceType>::is_full_;\n    using KNNUniqueResultSet<DistanceType>::worst_distance_;\n\n    /** The maximum number of neighbors to consider */\n    unsigned int capacity_;\n\n    /** The maximum distance of a neighbor */\n    DistanceType radius_;\n};\n}\n\n#endif //OPENCV_FLANN_RESULTSET_H\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/sampling.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n\n#ifndef OPENCV_FLANN_SAMPLING_H_\n#define OPENCV_FLANN_SAMPLING_H_\n\n#include \"matrix.h\"\n#include \"random.h\"\n\nnamespace cvflann\n{\n\ntemplate<typename T>\nMatrix<T> random_sample(Matrix<T>& srcMatrix, long size, bool remove = false)\n{\n    Matrix<T> newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols);\n\n    T* src,* dest;\n    for (long i=0; i<size; ++i) {\n        long r = rand_int((int)(srcMatrix.rows-i));\n        dest = newSet[i];\n        src = srcMatrix[r];\n        std::copy(src, src+srcMatrix.cols, dest);\n        if (remove) {\n            src = srcMatrix[srcMatrix.rows-i-1];\n            dest = srcMatrix[r];\n            std::copy(src, src+srcMatrix.cols, dest);\n        }\n    }\n    if (remove) {\n        srcMatrix.rows -= size;\n    }\n    return newSet;\n}\n\ntemplate<typename T>\nMatrix<T> random_sample(const Matrix<T>& srcMatrix, size_t size)\n{\n    UniqueRandom rand((int)srcMatrix.rows);\n    Matrix<T> newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols);\n\n    T* src,* dest;\n    for (size_t i=0; i<size; ++i) {\n        long r = rand.next();\n        dest = newSet[i];\n        src = srcMatrix[r];\n        std::copy(src, src+srcMatrix.cols, dest);\n    }\n    return newSet;\n}\n\n} // namespace\n\n\n#endif /* OPENCV_FLANN_SAMPLING_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/saving.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE NNIndexGOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_SAVING_H_\n#define OPENCV_FLANN_SAVING_H_\n\n#include <cstring>\n#include <vector>\n\n#include \"general.h\"\n#include \"nn_index.h\"\n\n#ifdef FLANN_SIGNATURE_\n#undef FLANN_SIGNATURE_\n#endif\n#define FLANN_SIGNATURE_ \"FLANN_INDEX\"\n\nnamespace cvflann\n{\n\ntemplate <typename T>\nstruct Datatype {};\ntemplate<>\nstruct Datatype<char> { static flann_datatype_t type() { return FLANN_INT8; } };\ntemplate<>\nstruct Datatype<short> { static flann_datatype_t type() { return FLANN_INT16; } };\ntemplate<>\nstruct Datatype<int> { static flann_datatype_t type() { return FLANN_INT32; } };\ntemplate<>\nstruct Datatype<unsigned char> { static flann_datatype_t type() { return FLANN_UINT8; } };\ntemplate<>\nstruct Datatype<unsigned short> { static flann_datatype_t type() { return FLANN_UINT16; } };\ntemplate<>\nstruct Datatype<unsigned int> { static flann_datatype_t type() { return FLANN_UINT32; } };\ntemplate<>\nstruct Datatype<float> { static flann_datatype_t type() { return FLANN_FLOAT32; } };\ntemplate<>\nstruct Datatype<double> { static flann_datatype_t type() { return FLANN_FLOAT64; } };\n\n\n/**\n * Structure representing the index header.\n */\nstruct IndexHeader\n{\n    char signature[16];\n    char version[16];\n    flann_datatype_t data_type;\n    flann_algorithm_t index_type;\n    size_t rows;\n    size_t cols;\n};\n\n/**\n * Saves index header to stream\n *\n * @param stream - Stream to save to\n * @param index - The index to save\n */\ntemplate<typename Distance>\nvoid save_header(FILE* stream, const NNIndex<Distance>& index)\n{\n    IndexHeader header;\n    memset(header.signature, 0, sizeof(header.signature));\n    strcpy(header.signature, FLANN_SIGNATURE_);\n    memset(header.version, 0, sizeof(header.version));\n    strcpy(header.version, FLANN_VERSION_);\n    header.data_type = Datatype<typename Distance::ElementType>::type();\n    header.index_type = index.getType();\n    header.rows = index.size();\n    header.cols = index.veclen();\n\n    std::fwrite(&header, sizeof(header),1,stream);\n}\n\n\n/**\n *\n * @param stream - Stream to load from\n * @return Index header\n */\ninline IndexHeader load_header(FILE* stream)\n{\n    IndexHeader header;\n    size_t read_size = fread(&header,sizeof(header),1,stream);\n\n    if (read_size!=(size_t)1) {\n        throw FLANNException(\"Invalid index file, cannot read\");\n    }\n\n    if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) {\n        throw FLANNException(\"Invalid index file, wrong signature\");\n    }\n\n    return header;\n\n}\n\n\ntemplate<typename T>\nvoid save_value(FILE* stream, const T& value, size_t count = 1)\n{\n    fwrite(&value, sizeof(value),count, stream);\n}\n\ntemplate<typename T>\nvoid save_value(FILE* stream, const cvflann::Matrix<T>& value)\n{\n    fwrite(&value, sizeof(value),1, stream);\n    fwrite(value.data, sizeof(T),value.rows*value.cols, stream);\n}\n\ntemplate<typename T>\nvoid save_value(FILE* stream, const std::vector<T>& value)\n{\n    size_t size = value.size();\n    fwrite(&size, sizeof(size_t), 1, stream);\n    fwrite(&value[0], sizeof(T), size, stream);\n}\n\ntemplate<typename T>\nvoid load_value(FILE* stream, T& value, size_t count = 1)\n{\n    size_t read_cnt = fread(&value, sizeof(value), count, stream);\n    if (read_cnt != count) {\n        throw FLANNException(\"Cannot read from file\");\n    }\n}\n\ntemplate<typename T>\nvoid load_value(FILE* stream, cvflann::Matrix<T>& value)\n{\n    size_t read_cnt = fread(&value, sizeof(value), 1, stream);\n    if (read_cnt != 1) {\n        throw FLANNException(\"Cannot read from file\");\n    }\n    value.data = new T[value.rows*value.cols];\n    read_cnt = fread(value.data, sizeof(T), value.rows*value.cols, stream);\n    if (read_cnt != (size_t)(value.rows*value.cols)) {\n        throw FLANNException(\"Cannot read from file\");\n    }\n}\n\n\ntemplate<typename T>\nvoid load_value(FILE* stream, std::vector<T>& value)\n{\n    size_t size;\n    size_t read_cnt = fread(&size, sizeof(size_t), 1, stream);\n    if (read_cnt!=1) {\n        throw FLANNException(\"Cannot read from file\");\n    }\n    value.resize(size);\n    read_cnt = fread(&value[0], sizeof(T), size, stream);\n    if (read_cnt != size) {\n        throw FLANNException(\"Cannot read from file\");\n    }\n}\n\n}\n\n#endif /* OPENCV_FLANN_SAVING_H_ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/simplex_downhill.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_SIMPLEX_DOWNHILL_H_\n#define OPENCV_FLANN_SIMPLEX_DOWNHILL_H_\n\nnamespace cvflann\n{\n\n/**\n    Adds val to array vals (and point to array points) and keeping the arrays sorted by vals.\n */\ntemplate <typename T>\nvoid addValue(int pos, float val, float* vals, T* point, T* points, int n)\n{\n    vals[pos] = val;\n    for (int i=0; i<n; ++i) {\n        points[pos*n+i] = point[i];\n    }\n\n    // bubble down\n    int j=pos;\n    while (j>0 && vals[j]<vals[j-1]) {\n        swap(vals[j],vals[j-1]);\n        for (int i=0; i<n; ++i) {\n            swap(points[j*n+i],points[(j-1)*n+i]);\n        }\n        --j;\n    }\n}\n\n\n/**\n    Simplex downhill optimization function.\n    Preconditions: points is a 2D mattrix of size (n+1) x n\n                    func is the cost function taking n an array of n params and returning float\n                    vals is the cost function in the n+1 simplex points, if NULL it will be computed\n\n    Postcondition: returns optimum value and points[0..n] are the optimum parameters\n */\ntemplate <typename T, typename F>\nfloat optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )\n{\n    const int MAX_ITERATIONS = 10;\n\n    assert(n>0);\n\n    T* p_o = new T[n];\n    T* p_r = new T[n];\n    T* p_e = new T[n];\n\n    int alpha = 1;\n\n    int iterations = 0;\n\n    bool ownVals = false;\n    if (vals == NULL) {\n        ownVals = true;\n        vals = new float[n+1];\n        for (int i=0; i<n+1; ++i) {\n            float val = func(points+i*n);\n            addValue(i, val, vals, points+i*n, points, n);\n        }\n    }\n    int nn = n*n;\n\n    while (true) {\n\n        if (iterations++ > MAX_ITERATIONS) break;\n\n        // compute average of simplex points (except the highest point)\n        for (int j=0; j<n; ++j) {\n            p_o[j] = 0;\n            for (int i=0; i<n; ++i) {\n                p_o[i] += points[j*n+i];\n            }\n        }\n        for (int i=0; i<n; ++i) {\n            p_o[i] /= n;\n        }\n\n        bool converged = true;\n        for (int i=0; i<n; ++i) {\n            if (p_o[i] != points[nn+i]) {\n                converged = false;\n            }\n        }\n        if (converged) break;\n\n        // trying a reflection\n        for (int i=0; i<n; ++i) {\n            p_r[i] = p_o[i] + alpha*(p_o[i]-points[nn+i]);\n        }\n        float val_r = func(p_r);\n\n        if ((val_r>=vals[0])&&(val_r<vals[n])) {\n            // reflection between second highest and lowest\n            // add it to the simplex\n            Logger::info(\"Choosing reflection\\n\");\n            addValue(n, val_r,vals, p_r, points, n);\n            continue;\n        }\n\n        if (val_r<vals[0]) {\n            // value is smaller than smalest in simplex\n\n            // expand some more to see if it drops further\n            for (int i=0; i<n; ++i) {\n                p_e[i] = 2*p_r[i]-p_o[i];\n            }\n            float val_e = func(p_e);\n\n            if (val_e<val_r) {\n                Logger::info(\"Choosing reflection and expansion\\n\");\n                addValue(n, val_e,vals,p_e,points,n);\n            }\n            else {\n                Logger::info(\"Choosing reflection\\n\");\n                addValue(n, val_r,vals,p_r,points,n);\n            }\n            continue;\n        }\n        if (val_r>=vals[n]) {\n            for (int i=0; i<n; ++i) {\n                p_e[i] = (p_o[i]+points[nn+i])/2;\n            }\n            float val_e = func(p_e);\n\n            if (val_e<vals[n]) {\n                Logger::info(\"Choosing contraction\\n\");\n                addValue(n,val_e,vals,p_e,points,n);\n                continue;\n            }\n        }\n        {\n            Logger::info(\"Full contraction\\n\");\n            for (int j=1; j<=n; ++j) {\n                for (int i=0; i<n; ++i) {\n                    points[j*n+i] = (points[j*n+i]+points[i])/2;\n                }\n                float val = func(points+j*n);\n                addValue(j,val,vals,points+j*n,points,n);\n            }\n        }\n    }\n\n    float bestVal = vals[0];\n\n    delete[] p_r;\n    delete[] p_o;\n    delete[] p_e;\n    if (ownVals) delete[] vals;\n\n    return bestVal;\n}\n\n}\n\n#endif //OPENCV_FLANN_SIMPLEX_DOWNHILL_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann/timer.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#ifndef OPENCV_FLANN_TIMER_H\n#define OPENCV_FLANN_TIMER_H\n\n#include <time.h>\n#include \"opencv2/core.hpp\"\n#include \"opencv2/core/utility.hpp\"\n\nnamespace cvflann\n{\n\n/**\n * A start-stop timer class.\n *\n * Can be used to time portions of code.\n */\nclass StartStopTimer\n{\n    int64 startTime;\n\npublic:\n    /**\n     * Value of the timer.\n     */\n    double value;\n\n\n    /**\n     * Constructor.\n     */\n    StartStopTimer()\n    {\n        reset();\n    }\n\n    /**\n     * Starts the timer.\n     */\n    void start()\n    {\n        startTime = cv::getTickCount();\n    }\n\n    /**\n     * Stops the timer and updates timer value.\n     */\n    void stop()\n    {\n        int64 stopTime = cv::getTickCount();\n        value += ( (double)stopTime - startTime) / cv::getTickFrequency();\n    }\n\n    /**\n     * Resets the timer value to 0.\n     */\n    void reset()\n    {\n        value = 0;\n    }\n\n};\n\n}\n\n#endif // FLANN_TIMER_H\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/flann.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef _OPENCV_FLANN_HPP_\n#define _OPENCV_FLANN_HPP_\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/flann/miniflann.hpp\"\n#include \"opencv2/flann/flann_base.hpp\"\n\n/**\n@defgroup flann Clustering and Search in Multi-Dimensional Spaces\n\nThis section documents OpenCV's interface to the FLANN library. FLANN (Fast Library for Approximate\nNearest Neighbors) is a library that contains a collection of algorithms optimized for fast nearest\nneighbor search in large datasets and for high dimensional features. More information about FLANN\ncan be found in @cite Muja2009 .\n*/\n\nnamespace cvflann\n{\n    CV_EXPORTS flann_distance_t flann_distance_type();\n    FLANN_DEPRECATED CV_EXPORTS void set_distance_type(flann_distance_t distance_type, int order);\n}\n\n\nnamespace cv\n{\nnamespace flann\n{\n\n\n//! @addtogroup flann\n//! @{\n\ntemplate <typename T> struct CvType {};\ntemplate <> struct CvType<unsigned char> { static int type() { return CV_8U; } };\ntemplate <> struct CvType<char> { static int type() { return CV_8S; } };\ntemplate <> struct CvType<unsigned short> { static int type() { return CV_16U; } };\ntemplate <> struct CvType<short> { static int type() { return CV_16S; } };\ntemplate <> struct CvType<int> { static int type() { return CV_32S; } };\ntemplate <> struct CvType<float> { static int type() { return CV_32F; } };\ntemplate <> struct CvType<double> { static int type() { return CV_64F; } };\n\n\n// bring the flann parameters into this namespace\nusing ::cvflann::get_param;\nusing ::cvflann::print_params;\n\n// bring the flann distances into this namespace\nusing ::cvflann::L2_Simple;\nusing ::cvflann::L2;\nusing ::cvflann::L1;\nusing ::cvflann::MinkowskiDistance;\nusing ::cvflann::MaxDistance;\nusing ::cvflann::HammingLUT;\nusing ::cvflann::Hamming;\nusing ::cvflann::Hamming2;\nusing ::cvflann::HistIntersectionDistance;\nusing ::cvflann::HellingerDistance;\nusing ::cvflann::ChiSquareDistance;\nusing ::cvflann::KL_Divergence;\n\n\n/** @brief The FLANN nearest neighbor index class. This class is templated with the type of elements for which\nthe index is built.\n */\ntemplate <typename Distance>\nclass GenericIndex\n{\npublic:\n        typedef typename Distance::ElementType ElementType;\n        typedef typename Distance::ResultType DistanceType;\n\n        /** @brief Constructs a nearest neighbor search index for a given dataset.\n\n        @param features Matrix of containing the features(points) to index. The size of the matrix is\n        num_features x feature_dimensionality and the data type of the elements in the matrix must\n        coincide with the type of the index.\n        @param params Structure containing the index parameters. The type of index that will be\n        constructed depends on the type of this parameter. See the description.\n        @param distance\n\n        The method constructs a fast search structure from a set of features using the specified algorithm\n        with specified parameters, as defined by params. params is a reference to one of the following class\n        IndexParams descendants:\n\n        - **LinearIndexParams** When passing an object of this type, the index will perform a linear,\n        brute-force search. :\n        @code\n        struct LinearIndexParams : public IndexParams\n        {\n        };\n        @endcode\n        - **KDTreeIndexParams** When passing an object of this type the index constructed will consist of\n        a set of randomized kd-trees which will be searched in parallel. :\n        @code\n        struct KDTreeIndexParams : public IndexParams\n        {\n            KDTreeIndexParams( int trees = 4 );\n        };\n        @endcode\n        - **KMeansIndexParams** When passing an object of this type the index constructed will be a\n        hierarchical k-means tree. :\n        @code\n        struct KMeansIndexParams : public IndexParams\n        {\n            KMeansIndexParams(\n                int branching = 32,\n                int iterations = 11,\n                flann_centers_init_t centers_init = CENTERS_RANDOM,\n                float cb_index = 0.2 );\n        };\n        @endcode\n        - **CompositeIndexParams** When using a parameters object of this type the index created\n        combines the randomized kd-trees and the hierarchical k-means tree. :\n        @code\n        struct CompositeIndexParams : public IndexParams\n        {\n            CompositeIndexParams(\n                int trees = 4,\n                int branching = 32,\n                int iterations = 11,\n                flann_centers_init_t centers_init = CENTERS_RANDOM,\n                float cb_index = 0.2 );\n        };\n        @endcode\n        - **LshIndexParams** When using a parameters object of this type the index created uses\n        multi-probe LSH (by Multi-Probe LSH: Efficient Indexing for High-Dimensional Similarity Search\n        by Qin Lv, William Josephson, Zhe Wang, Moses Charikar, Kai Li., Proceedings of the 33rd\n        International Conference on Very Large Data Bases (VLDB). Vienna, Austria. September 2007) :\n        @code\n        struct LshIndexParams : public IndexParams\n        {\n            LshIndexParams(\n                unsigned int table_number,\n                unsigned int key_size,\n                unsigned int multi_probe_level );\n        };\n        @endcode\n        - **AutotunedIndexParams** When passing an object of this type the index created is\n        automatically tuned to offer the best performance, by choosing the optimal index type\n        (randomized kd-trees, hierarchical kmeans, linear) and parameters for the dataset provided. :\n        @code\n        struct AutotunedIndexParams : public IndexParams\n        {\n            AutotunedIndexParams(\n                float target_precision = 0.9,\n                float build_weight = 0.01,\n                float memory_weight = 0,\n                float sample_fraction = 0.1 );\n        };\n        @endcode\n        - **SavedIndexParams** This object type is used for loading a previously saved index from the\n        disk. :\n        @code\n        struct SavedIndexParams : public IndexParams\n        {\n            SavedIndexParams( String filename );\n        };\n        @endcode\n         */\n        GenericIndex(const Mat& features, const ::cvflann::IndexParams& params, Distance distance = Distance());\n\n        ~GenericIndex();\n\n        /** @brief Performs a K-nearest neighbor search for a given query point using the index.\n\n        @param query The query point\n        @param indices Vector that will contain the indices of the K-nearest neighbors found. It must have\n        at least knn size.\n        @param dists Vector that will contain the distances to the K-nearest neighbors found. It must have\n        at least knn size.\n        @param knn Number of nearest neighbors to search for.\n        @param params SearchParams\n         */\n        void knnSearch(const std::vector<ElementType>& query, std::vector<int>& indices,\n                       std::vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& params);\n        void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params);\n\n        int radiusSearch(const std::vector<ElementType>& query, std::vector<int>& indices,\n                         std::vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& params);\n        int radiusSearch(const Mat& query, Mat& indices, Mat& dists,\n                         DistanceType radius, const ::cvflann::SearchParams& params);\n\n        void save(String filename) { nnIndex->save(filename); }\n\n        int veclen() const { return nnIndex->veclen(); }\n\n        int size() const { return nnIndex->size(); }\n\n        ::cvflann::IndexParams getParameters() { return nnIndex->getParameters(); }\n\n        FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() { return nnIndex->getIndexParameters(); }\n\nprivate:\n        ::cvflann::Index<Distance>* nnIndex;\n};\n\n//! @cond IGNORED\n\n#define FLANN_DISTANCE_CHECK \\\n    if ( ::cvflann::flann_distance_type() != cvflann::FLANN_DIST_L2) { \\\n        printf(\"[WARNING] You are using cv::flann::Index (or cv::flann::GenericIndex) and have also changed \"\\\n        \"the distance using cvflann::set_distance_type. This is no longer working as expected \"\\\n        \"(cv::flann::Index always uses L2). You should create the index templated on the distance, \"\\\n        \"for example for L1 distance use: GenericIndex< L1<float> > \\n\"); \\\n    }\n\n\ntemplate <typename Distance>\nGenericIndex<Distance>::GenericIndex(const Mat& dataset, const ::cvflann::IndexParams& params, Distance distance)\n{\n    CV_Assert(dataset.type() == CvType<ElementType>::type());\n    CV_Assert(dataset.isContinuous());\n    ::cvflann::Matrix<ElementType> m_dataset((ElementType*)dataset.ptr<ElementType>(0), dataset.rows, dataset.cols);\n\n    nnIndex = new ::cvflann::Index<Distance>(m_dataset, params, distance);\n\n    FLANN_DISTANCE_CHECK\n\n    nnIndex->buildIndex();\n}\n\ntemplate <typename Distance>\nGenericIndex<Distance>::~GenericIndex()\n{\n    delete nnIndex;\n}\n\ntemplate <typename Distance>\nvoid GenericIndex<Distance>::knnSearch(const std::vector<ElementType>& query, std::vector<int>& indices, std::vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& searchParams)\n{\n    ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());\n    ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());\n    ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());\n\n    FLANN_DISTANCE_CHECK\n\n    nnIndex->knnSearch(m_query,m_indices,m_dists,knn,searchParams);\n}\n\n\ntemplate <typename Distance>\nvoid GenericIndex<Distance>::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams)\n{\n    CV_Assert(queries.type() == CvType<ElementType>::type());\n    CV_Assert(queries.isContinuous());\n    ::cvflann::Matrix<ElementType> m_queries((ElementType*)queries.ptr<ElementType>(0), queries.rows, queries.cols);\n\n    CV_Assert(indices.type() == CV_32S);\n    CV_Assert(indices.isContinuous());\n    ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);\n\n    CV_Assert(dists.type() == CvType<DistanceType>::type());\n    CV_Assert(dists.isContinuous());\n    ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);\n\n    FLANN_DISTANCE_CHECK\n\n    nnIndex->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);\n}\n\ntemplate <typename Distance>\nint GenericIndex<Distance>::radiusSearch(const std::vector<ElementType>& query, std::vector<int>& indices, std::vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)\n{\n    ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());\n    ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());\n    ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());\n\n    FLANN_DISTANCE_CHECK\n\n    return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);\n}\n\ntemplate <typename Distance>\nint GenericIndex<Distance>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)\n{\n    CV_Assert(query.type() == CvType<ElementType>::type());\n    CV_Assert(query.isContinuous());\n    ::cvflann::Matrix<ElementType> m_query((ElementType*)query.ptr<ElementType>(0), query.rows, query.cols);\n\n    CV_Assert(indices.type() == CV_32S);\n    CV_Assert(indices.isContinuous());\n    ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);\n\n    CV_Assert(dists.type() == CvType<DistanceType>::type());\n    CV_Assert(dists.isContinuous());\n    ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);\n\n    FLANN_DISTANCE_CHECK\n\n    return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);\n}\n\n//! @endcond\n\n/**\n * @deprecated Use GenericIndex class instead\n */\ntemplate <typename T>\nclass\n#ifndef _MSC_VER\n FLANN_DEPRECATED\n#endif\n Index_ {\npublic:\n        typedef typename L2<T>::ElementType ElementType;\n        typedef typename L2<T>::ResultType DistanceType;\n\n    Index_(const Mat& features, const ::cvflann::IndexParams& params);\n\n    ~Index_();\n\n    void knnSearch(const std::vector<ElementType>& query, std::vector<int>& indices, std::vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& params);\n    void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params);\n\n    int radiusSearch(const std::vector<ElementType>& query, std::vector<int>& indices, std::vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& params);\n    int radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& params);\n\n    void save(String filename)\n        {\n            if (nnIndex_L1) nnIndex_L1->save(filename);\n            if (nnIndex_L2) nnIndex_L2->save(filename);\n        }\n\n    int veclen() const\n    {\n            if (nnIndex_L1) return nnIndex_L1->veclen();\n            if (nnIndex_L2) return nnIndex_L2->veclen();\n        }\n\n    int size() const\n    {\n            if (nnIndex_L1) return nnIndex_L1->size();\n            if (nnIndex_L2) return nnIndex_L2->size();\n        }\n\n        ::cvflann::IndexParams getParameters()\n        {\n            if (nnIndex_L1) return nnIndex_L1->getParameters();\n            if (nnIndex_L2) return nnIndex_L2->getParameters();\n\n        }\n\n        FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters()\n        {\n            if (nnIndex_L1) return nnIndex_L1->getIndexParameters();\n            if (nnIndex_L2) return nnIndex_L2->getIndexParameters();\n        }\n\nprivate:\n        // providing backwards compatibility for L2 and L1 distances (most common)\n        ::cvflann::Index< L2<ElementType> >* nnIndex_L2;\n        ::cvflann::Index< L1<ElementType> >* nnIndex_L1;\n};\n\n#ifdef _MSC_VER\ntemplate <typename T>\nclass FLANN_DEPRECATED Index_;\n#endif\n\n//! @cond IGNORED\n\ntemplate <typename T>\nIndex_<T>::Index_(const Mat& dataset, const ::cvflann::IndexParams& params)\n{\n    printf(\"[WARNING] The cv::flann::Index_<T> class is deperecated, use cv::flann::GenericIndex<Distance> instead\\n\");\n\n    CV_Assert(dataset.type() == CvType<ElementType>::type());\n    CV_Assert(dataset.isContinuous());\n    ::cvflann::Matrix<ElementType> m_dataset((ElementType*)dataset.ptr<ElementType>(0), dataset.rows, dataset.cols);\n\n    if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) {\n        nnIndex_L1 = NULL;\n        nnIndex_L2 = new ::cvflann::Index< L2<ElementType> >(m_dataset, params);\n    }\n    else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) {\n        nnIndex_L1 = new ::cvflann::Index< L1<ElementType> >(m_dataset, params);\n        nnIndex_L2 = NULL;\n    }\n    else {\n        printf(\"[ERROR] cv::flann::Index_<T> only provides backwards compatibility for the L1 and L2 distances. \"\n        \"For other distance types you must use cv::flann::GenericIndex<Distance>\\n\");\n        CV_Assert(0);\n    }\n    if (nnIndex_L1) nnIndex_L1->buildIndex();\n    if (nnIndex_L2) nnIndex_L2->buildIndex();\n}\n\ntemplate <typename T>\nIndex_<T>::~Index_()\n{\n    if (nnIndex_L1) delete nnIndex_L1;\n    if (nnIndex_L2) delete nnIndex_L2;\n}\n\ntemplate <typename T>\nvoid Index_<T>::knnSearch(const std::vector<ElementType>& query, std::vector<int>& indices, std::vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& searchParams)\n{\n    ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());\n    ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());\n    ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());\n\n    if (nnIndex_L1) nnIndex_L1->knnSearch(m_query,m_indices,m_dists,knn,searchParams);\n    if (nnIndex_L2) nnIndex_L2->knnSearch(m_query,m_indices,m_dists,knn,searchParams);\n}\n\n\ntemplate <typename T>\nvoid Index_<T>::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams)\n{\n    CV_Assert(queries.type() == CvType<ElementType>::type());\n    CV_Assert(queries.isContinuous());\n    ::cvflann::Matrix<ElementType> m_queries((ElementType*)queries.ptr<ElementType>(0), queries.rows, queries.cols);\n\n    CV_Assert(indices.type() == CV_32S);\n    CV_Assert(indices.isContinuous());\n    ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);\n\n    CV_Assert(dists.type() == CvType<DistanceType>::type());\n    CV_Assert(dists.isContinuous());\n    ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);\n\n    if (nnIndex_L1) nnIndex_L1->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);\n    if (nnIndex_L2) nnIndex_L2->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);\n}\n\ntemplate <typename T>\nint Index_<T>::radiusSearch(const std::vector<ElementType>& query, std::vector<int>& indices, std::vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)\n{\n    ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());\n    ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());\n    ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());\n\n    if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);\n    if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);\n}\n\ntemplate <typename T>\nint Index_<T>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)\n{\n    CV_Assert(query.type() == CvType<ElementType>::type());\n    CV_Assert(query.isContinuous());\n    ::cvflann::Matrix<ElementType> m_query((ElementType*)query.ptr<ElementType>(0), query.rows, query.cols);\n\n    CV_Assert(indices.type() == CV_32S);\n    CV_Assert(indices.isContinuous());\n    ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);\n\n    CV_Assert(dists.type() == CvType<DistanceType>::type());\n    CV_Assert(dists.isContinuous());\n    ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);\n\n    if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);\n    if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);\n}\n\n//! @endcond\n\n/** @brief Clusters features using hierarchical k-means algorithm.\n\n@param features The points to be clustered. The matrix must have elements of type\nDistance::ElementType.\n@param centers The centers of the clusters obtained. The matrix must have type\nDistance::ResultType. The number of rows in this matrix represents the number of clusters desired,\nhowever, because of the way the cut in the hierarchical tree is chosen, the number of clusters\ncomputed will be the highest number of the form (branching-1)\\*k+1 that's lower than the number of\nclusters desired, where branching is the tree's branching factor (see description of the\nKMeansIndexParams).\n@param params Parameters used in the construction of the hierarchical k-means tree.\n@param d Distance to be used for clustering.\n\nThe method clusters the given feature vectors by constructing a hierarchical k-means tree and\nchoosing a cut in the tree that minimizes the cluster's variance. It returns the number of clusters\nfound.\n */\ntemplate <typename Distance>\nint hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params,\n                           Distance d = Distance())\n{\n    typedef typename Distance::ElementType ElementType;\n    typedef typename Distance::ResultType DistanceType;\n\n    CV_Assert(features.type() == CvType<ElementType>::type());\n    CV_Assert(features.isContinuous());\n    ::cvflann::Matrix<ElementType> m_features((ElementType*)features.ptr<ElementType>(0), features.rows, features.cols);\n\n    CV_Assert(centers.type() == CvType<DistanceType>::type());\n    CV_Assert(centers.isContinuous());\n    ::cvflann::Matrix<DistanceType> m_centers((DistanceType*)centers.ptr<DistanceType>(0), centers.rows, centers.cols);\n\n    return ::cvflann::hierarchicalClustering<Distance>(m_features, m_centers, params, d);\n}\n\n/** @deprecated\n*/\ntemplate <typename ELEM_TYPE, typename DIST_TYPE>\nFLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params)\n{\n    printf(\"[WARNING] cv::flann::hierarchicalClustering<ELEM_TYPE,DIST_TYPE> is deprecated, use \"\n        \"cv::flann::hierarchicalClustering<Distance> instead\\n\");\n\n    if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) {\n        return hierarchicalClustering< L2<ELEM_TYPE> >(features, centers, params);\n    }\n    else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) {\n        return hierarchicalClustering< L1<ELEM_TYPE> >(features, centers, params);\n    }\n    else {\n        printf(\"[ERROR] cv::flann::hierarchicalClustering<ELEM_TYPE,DIST_TYPE> only provides backwards \"\n        \"compatibility for the L1 and L2 distances. \"\n        \"For other distance types you must use cv::flann::hierarchicalClustering<Distance>\\n\");\n        CV_Assert(0);\n    }\n}\n\n//! @} flann\n\n} } // namespace cv::flann\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/fuzzy/fuzzy_F0_math.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling,\n// Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_FUZZY_F0_MATH_H__\n#define __OPENCV_FUZZY_F0_MATH_H__\n\n#include \"opencv2/fuzzy/types.hpp\"\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\n\nnamespace ft\n{\n    //! @addtogroup f0_math\n    //! @{\n\n    /** @brief Computes components of the array using direct F0-transform.\n    @param matrix Input 1-channel array.\n    @param kernel Kernel used for processing. Function **createKernel** can be used.\n    @param components Output 32-bit array for the components.\n    @param mask Mask can be used for unwanted area marking.\n\n    The function computes components using predefined kernel and mask.\n\n    @note\n        F-transform technique is described in paper @cite Perf:FT.\n     */\n    CV_EXPORTS void FT02D_components(InputArray matrix, InputArray kernel, OutputArray components, InputArray mask);\n\n    /** @brief Computes components of the array using direct F0-transform.\n    @param matrix Input 1-channel array.\n    @param kernel Kernel used for processing. Function **createKernel** can be used.\n    @param components Output 32-bit array for the components.\n\n    The function computes components using predefined kernel.\n\n    @note\n        F-transform technique is described in paper @cite Perf:FT.\n     */\n    CV_EXPORTS void FT02D_components(InputArray matrix, InputArray kernel, OutputArray components);\n\n    /** @brief Computes inverse F0-transfrom.\n    @param components Input 32-bit array for the components.\n    @param kernel Kernel used for processing. Function **createKernel** can be used.\n    @param output Output 32-bit array.\n    @param width Width of the output array.\n    @param height Height of the output array.\n\n    @note\n        F-transform technique is described in paper @cite Perf:FT.\n     */\n    CV_EXPORTS void FT02D_inverseFT(InputArray components, InputArray kernel, OutputArray output, int width, int height);\n\n    /** @brief Computes F0-transfrom and inverse F0-transfrom at once.\n    @param image Input image.\n    @param kernel Kernel used for processing. Function **createKernel** can be used.\n    @param output Output 32-bit array.\n    @param mask Mask used for unwanted area marking.\n\n    This function computes F-transfrom and inverse F-transfotm in one step. It is fully sufficient and optimized for **Mat**.\n    */\n    CV_EXPORTS void FT02D_process(const Mat &image, const Mat &kernel, Mat &output, const Mat &mask);\n\n    /** @brief Computes F0-transfrom and inverse F0-transfrom at once and return state.\n    @param image Input image.\n    @param kernel Kernel used for processing. Function **createKernel** can be used.\n    @param imageOutput Output 32-bit array.\n    @param mask Mask used for unwanted area marking.\n    @param maskOutput Mask after one iteration.\n    @param firstStop If **true** function returns -1 when first problem appears. In case of **false**, the process is completed and summation of all problems returned.\n\n    This function computes iteration of F-transfrom and inverse F-transfotm and handle image and mask change. The function is used in *inpaint* function.\n    */\n    CV_EXPORTS int FT02D_iteration(const Mat &image, const Mat &kernel, Mat &imageOutput, const Mat &mask, Mat &maskOutput, bool firstStop = true);\n\n    //! @}\n}\n}\n\n#endif // __OPENCV_FUZZY_F0_MATH_H__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/fuzzy/fuzzy_image.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling,\n// Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_FUZZY_IMAGE_H__\n#define __OPENCV_FUZZY_IMAGE_H__\n\n#include \"types.hpp\"\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\n\nnamespace ft\n{\n    //! @addtogroup f_image\n    //! @{\n\n    /** @brief Creates kernel from basic functions.\n    @param A Basic function used in axis **x**.\n    @param B Basic function used in axis **y**.\n    @param kernel Final 32-b kernel derived from **A** and **B**.\n    @param chn Number of kernel channels.\n\n    The function creates kernel usable for latter fuzzy image processing.\n    */\n    CV_EXPORTS void createKernel(cv::InputArray A, cv::InputArray B, cv::OutputArray kernel, const int chn = 1);\n\n    /** @brief Creates kernel from general functions.\n    @param function Function type could be one of the following:\n        -   **LINEAR** Linear basic function.\n    @param radius Radius of the basic function.\n    @param kernel Final 32-b kernel.\n    @param chn Number of kernel channels.\n\n    The function creates kernel from predefined functions.\n    */\n    CV_EXPORTS void createKernel(int function, int radius, cv::OutputArray kernel, const int chn = 1);\n\n    /** @brief Image inpainting\n    @param image Input image.\n    @param mask Mask used for unwanted area marking.\n    @param output Output 32-bit image.\n    @param radius Radius of the basic function.\n    @param function Function type could be one of the following:\n        -   **LINEAR** Linear basic function.\n    @param algorithm Algorithm could be one of the following:\n        -   **ONE_STEP** One step algorithm.\n        -   **MULTI_STEP** Algorithm automaticaly increasing radius of the basic function.\n        -   **ITERATIVE** Iterative algorithm running in more steps using partial computations.\n\n    This function provides inpainting technique based on the fuzzy mathematic.\n\n    @note\n        The algorithms are described in paper @cite Perf:rec.\n    */\n    CV_EXPORTS void inpaint(const cv::Mat &image, const cv::Mat &mask, cv::Mat &output, int radius = 2, int function = ft::LINEAR, int algorithm = ft::ONE_STEP);\n\n    /** @brief Image filtering\n    @param image Input image.\n    @param kernel Final 32-b kernel.\n    @param output Output 32-bit image.\n\n    Filtering of the input image by means of F-transform.\n    */\n    CV_EXPORTS void filter(const cv::Mat &image, const cv::Mat &kernel, cv::Mat &output);\n\n    //! @}\n}\n}\n\n#endif // __OPENCV_FUZZY_IMAGE_H__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/fuzzy/types.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling,\n// Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_FUZZY_TYPES_H__\n#define __OPENCV_FUZZY_TYPES_H__\n\nnamespace cv\n{\n\nnamespace ft\n{\n    //! @addtogroup fuzzy\n    //! @{\n\n    enum\n    {\n        LINEAR = 1,\n        SINUS = 2\n    };\n\n    enum\n    {\n        ONE_STEP = 1,\n        MULTI_STEP = 2,\n        ITERATIVE = 3\n    };\n\n    //! @}\n}\n}\n\n#endif // __OPENCV_FUZZY_TYPES_H__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/fuzzy.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling,\n// Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_FUZZY_H__\n#define __OPENCV_FUZZY_H__\n\n#include \"opencv2/fuzzy/types.hpp\"\n#include \"opencv2/fuzzy/fuzzy_F0_math.hpp\"\n#include \"opencv2/fuzzy/fuzzy_image.hpp\"\n\n/**\n@defgroup fuzzy Image processing based on fuzzy mathematics\n\nNamespace for all functions is **ft**. The module brings implementation of the last image processing algorithms based on fuzzy mathematics.\n\n  @{\n    @defgroup f0_math Math with F0-transfrom support\n\nFuzzy transform (F-transform) of the 0th degree transform whole image to a vector of its components. These components are used in latter computation.\n\n    @defgroup f_image Fuzzy image processing\n\nImage proceesing based on F-transform is fast to process and easy to understand.\n   @}\n\n*/\n\n#endif // __OPENCV_FUZZY_H__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/highgui/highgui.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/highgui.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/highgui/highgui_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                        Intel License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000, Intel Corporation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of Intel Corporation may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_HIGHGUI_H__\n#define __OPENCV_HIGHGUI_H__\n\n#include \"opencv2/core/core_c.h\"\n#include \"opencv2/imgproc/imgproc_c.h\"\n#include \"opencv2/imgcodecs/imgcodecs_c.h\"\n#include \"opencv2/videoio/videoio_c.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n/** @addtogroup highgui_c\n  @{\n  */\n\n/****************************************************************************************\\\n*                                  Basic GUI functions                                   *\n\\****************************************************************************************/\n//YV\n//-----------New for Qt\n/* For font */\nenum {  CV_FONT_LIGHT           = 25,//QFont::Light,\n        CV_FONT_NORMAL          = 50,//QFont::Normal,\n        CV_FONT_DEMIBOLD        = 63,//QFont::DemiBold,\n        CV_FONT_BOLD            = 75,//QFont::Bold,\n        CV_FONT_BLACK           = 87 //QFont::Black\n};\n\nenum {  CV_STYLE_NORMAL         = 0,//QFont::StyleNormal,\n        CV_STYLE_ITALIC         = 1,//QFont::StyleItalic,\n        CV_STYLE_OBLIQUE        = 2 //QFont::StyleOblique\n};\n/* ---------*/\n\n//for color cvScalar(blue_component, green_component, red_component[, alpha_component])\n//and alpha= 0 <-> 0xFF (not transparent <-> transparent)\nCVAPI(CvFont) cvFontQt(const char* nameFont, int pointSize CV_DEFAULT(-1), CvScalar color CV_DEFAULT(cvScalarAll(0)), int weight CV_DEFAULT(CV_FONT_NORMAL),  int style CV_DEFAULT(CV_STYLE_NORMAL), int spacing CV_DEFAULT(0));\n\nCVAPI(void) cvAddText(const CvArr* img, const char* text, CvPoint org, CvFont *arg2);\n\nCVAPI(void) cvDisplayOverlay(const char* name, const char* text, int delayms CV_DEFAULT(0));\nCVAPI(void) cvDisplayStatusBar(const char* name, const char* text, int delayms CV_DEFAULT(0));\n\nCVAPI(void) cvSaveWindowParameters(const char* name);\nCVAPI(void) cvLoadWindowParameters(const char* name);\nCVAPI(int) cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);\nCVAPI(void) cvStopLoop( void );\n\ntypedef void (CV_CDECL *CvButtonCallback)(int state, void* userdata);\nenum {CV_PUSH_BUTTON = 0, CV_CHECKBOX = 1, CV_RADIOBOX = 2};\nCVAPI(int) cvCreateButton( const char* button_name CV_DEFAULT(NULL),CvButtonCallback on_change CV_DEFAULT(NULL), void* userdata CV_DEFAULT(NULL) , int button_type CV_DEFAULT(CV_PUSH_BUTTON), int initial_button_state CV_DEFAULT(0));\n//----------------------\n\n\n/* this function is used to set some external parameters in case of X Window */\nCVAPI(int) cvInitSystem( int argc, char** argv );\n\nCVAPI(int) cvStartWindowThread( void );\n\n// ---------  YV ---------\nenum\n{\n    //These 3 flags are used by cvSet/GetWindowProperty\n    CV_WND_PROP_FULLSCREEN = 0, //to change/get window's fullscreen property\n    CV_WND_PROP_AUTOSIZE   = 1, //to change/get window's autosize property\n    CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property\n    CV_WND_PROP_OPENGL     = 3, //to change/get window's opengl support\n\n    //These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty\n    CV_WINDOW_NORMAL       = 0x00000000, //the user can resize the window (no constraint)  / also use to switch a fullscreen window to a normal size\n    CV_WINDOW_AUTOSIZE     = 0x00000001, //the user cannot resize the window, the size is constrainted by the image displayed\n    CV_WINDOW_OPENGL       = 0x00001000, //window with opengl support\n\n    //Those flags are only for Qt\n    CV_GUI_EXPANDED         = 0x00000000, //status bar and tool bar\n    CV_GUI_NORMAL           = 0x00000010, //old fashious way\n\n    //These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty\n    CV_WINDOW_FULLSCREEN   = 1,//change the window to fullscreen\n    CV_WINDOW_FREERATIO    = 0x00000100,//the image expends as much as it can (no ratio constraint)\n    CV_WINDOW_KEEPRATIO    = 0x00000000//the ration image is respected.\n};\n\n/* create window */\nCVAPI(int) cvNamedWindow( const char* name, int flags CV_DEFAULT(CV_WINDOW_AUTOSIZE) );\n\n/* Set and Get Property of the window */\nCVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value);\nCVAPI(double) cvGetWindowProperty(const char* name, int prop_id);\n\n/* display image within window (highgui windows remember their content) */\nCVAPI(void) cvShowImage( const char* name, const CvArr* image );\n\n/* resize/move window */\nCVAPI(void) cvResizeWindow( const char* name, int width, int height );\nCVAPI(void) cvMoveWindow( const char* name, int x, int y );\n\n\n/* destroy window and all the trackers associated with it */\nCVAPI(void) cvDestroyWindow( const char* name );\n\nCVAPI(void) cvDestroyAllWindows(void);\n\n/* get native window handle (HWND in case of Win32 and Widget in case of X Window) */\nCVAPI(void*) cvGetWindowHandle( const char* name );\n\n/* get name of highgui window given its native handle */\nCVAPI(const char*) cvGetWindowName( void* window_handle );\n\n\ntypedef void (CV_CDECL *CvTrackbarCallback)(int pos);\n\n/* create trackbar and display it on top of given window, set callback */\nCVAPI(int) cvCreateTrackbar( const char* trackbar_name, const char* window_name,\n                             int* value, int count, CvTrackbarCallback on_change CV_DEFAULT(NULL));\n\ntypedef void (CV_CDECL *CvTrackbarCallback2)(int pos, void* userdata);\n\nCVAPI(int) cvCreateTrackbar2( const char* trackbar_name, const char* window_name,\n                              int* value, int count, CvTrackbarCallback2 on_change,\n                              void* userdata CV_DEFAULT(0));\n\n/* retrieve or set trackbar position */\nCVAPI(int) cvGetTrackbarPos( const char* trackbar_name, const char* window_name );\nCVAPI(void) cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos );\nCVAPI(void) cvSetTrackbarMax(const char* trackbar_name, const char* window_name, int maxval);\nCVAPI(void) cvSetTrackbarMin(const char* trackbar_name, const char* window_name, int minval);\n\nenum\n{\n    CV_EVENT_MOUSEMOVE      =0,\n    CV_EVENT_LBUTTONDOWN    =1,\n    CV_EVENT_RBUTTONDOWN    =2,\n    CV_EVENT_MBUTTONDOWN    =3,\n    CV_EVENT_LBUTTONUP      =4,\n    CV_EVENT_RBUTTONUP      =5,\n    CV_EVENT_MBUTTONUP      =6,\n    CV_EVENT_LBUTTONDBLCLK  =7,\n    CV_EVENT_RBUTTONDBLCLK  =8,\n    CV_EVENT_MBUTTONDBLCLK  =9,\n    CV_EVENT_MOUSEWHEEL     =10,\n    CV_EVENT_MOUSEHWHEEL    =11\n};\n\nenum\n{\n    CV_EVENT_FLAG_LBUTTON   =1,\n    CV_EVENT_FLAG_RBUTTON   =2,\n    CV_EVENT_FLAG_MBUTTON   =4,\n    CV_EVENT_FLAG_CTRLKEY   =8,\n    CV_EVENT_FLAG_SHIFTKEY  =16,\n    CV_EVENT_FLAG_ALTKEY    =32\n};\n\n\n#define CV_GET_WHEEL_DELTA(flags) ((short)((flags >> 16) & 0xffff)) // upper 16 bits\n\ntypedef void (CV_CDECL *CvMouseCallback )(int event, int x, int y, int flags, void* param);\n\n/* assign callback for mouse events */\nCVAPI(void) cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse,\n                                void* param CV_DEFAULT(NULL));\n\n/* wait for key event infinitely (delay<=0) or for \"delay\" milliseconds */\nCVAPI(int) cvWaitKey(int delay CV_DEFAULT(0));\n\n// OpenGL support\n\ntypedef void (CV_CDECL *CvOpenGlDrawCallback)(void* userdata);\nCVAPI(void) cvSetOpenGlDrawCallback(const char* window_name, CvOpenGlDrawCallback callback, void* userdata CV_DEFAULT(NULL));\n\nCVAPI(void) cvSetOpenGlContext(const char* window_name);\nCVAPI(void) cvUpdateWindow(const char* window_name);\n\n\n/****************************************************************************************\\\n\n*                              Obsolete functions/synonyms                               *\n\\****************************************************************************************/\n\n#define cvAddSearchPath(path)\n#define cvvInitSystem cvInitSystem\n#define cvvNamedWindow cvNamedWindow\n#define cvvShowImage cvShowImage\n#define cvvResizeWindow cvResizeWindow\n#define cvvDestroyWindow cvDestroyWindow\n#define cvvCreateTrackbar cvCreateTrackbar\n#define cvvAddSearchPath cvAddSearchPath\n#define cvvWaitKey(name) cvWaitKey(0)\n#define cvvWaitKeyEx(name,delay) cvWaitKey(delay)\n#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE\n#define set_preprocess_func cvSetPreprocessFuncWin32\n#define set_postprocess_func cvSetPostprocessFuncWin32\n\n#if defined WIN32 || defined _WIN32\n\nCVAPI(void) cvSetPreprocessFuncWin32_(const void* callback);\nCVAPI(void) cvSetPostprocessFuncWin32_(const void* callback);\n#define cvSetPreprocessFuncWin32(callback) cvSetPreprocessFuncWin32_((const void*)(callback))\n#define cvSetPostprocessFuncWin32(callback) cvSetPostprocessFuncWin32_((const void*)(callback))\n\n#endif\n\n/** @} highgui_c */\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/highgui.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_HIGHGUI_HPP__\n#define __OPENCV_HIGHGUI_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/imgcodecs.hpp\"\n#include \"opencv2/videoio.hpp\"\n\n/**\n@defgroup highgui High-level GUI\n\nWhile OpenCV was designed for use in full-scale applications and can be used within functionally\nrich UI frameworks (such as Qt\\*, WinForms\\*, or Cocoa\\*) or without any UI at all, sometimes there\nit is required to try functionality quickly and visualize the results. This is what the HighGUI\nmodule has been designed for.\n\nIt provides easy interface to:\n\n-   Create and manipulate windows that can display images and \"remember\" their content (no need to\n    handle repaint events from OS).\n-   Add trackbars to the windows, handle simple mouse events as well as keyboard commands.\n\n@{\n    @defgroup highgui_opengl OpenGL support\n    @defgroup highgui_qt Qt New Functions\n\n    ![image](pics/qtgui.png)\n\n    This figure explains new functionality implemented with Qt\\* GUI. The new GUI provides a statusbar,\n    a toolbar, and a control panel. The control panel can have trackbars and buttonbars attached to it.\n    If you cannot see the control panel, press Ctrl+P or right-click any Qt window and select **Display\n    properties window**.\n\n    -   To attach a trackbar, the window name parameter must be NULL.\n\n    -   To attach a buttonbar, a button must be created. If the last bar attached to the control panel\n        is a buttonbar, the new button is added to the right of the last button. If the last bar\n        attached to the control panel is a trackbar, or the control panel is empty, a new buttonbar is\n        created. Then, a new button is attached to it.\n\n    See below the example used to generate the figure:\n    @code\n        int main(int argc, char *argv[])\n        {\n\n            int value = 50;\n            int value2 = 0;\n\n\n            namedWindow(\"main1\",WINDOW_NORMAL);\n            namedWindow(\"main2\",WINDOW_AUTOSIZE | CV_GUI_NORMAL);\n            createTrackbar( \"track1\", \"main1\", &value, 255,  NULL);\n\n            String nameb1 = \"button1\";\n            String nameb2 = \"button2\";\n\n            createButton(nameb1,callbackButton,&nameb1,QT_CHECKBOX,1);\n            createButton(nameb2,callbackButton,NULL,QT_CHECKBOX,0);\n            createTrackbar( \"track2\", NULL, &value2, 255, NULL);\n            createButton(\"button5\",callbackButton1,NULL,QT_RADIOBOX,0);\n            createButton(\"button6\",callbackButton2,NULL,QT_RADIOBOX,1);\n\n            setMouseCallback( \"main2\",on_mouse,NULL );\n\n            Mat img1 = imread(\"files/flower.jpg\");\n            VideoCapture video;\n            video.open(\"files/hockey.avi\");\n\n            Mat img2,img3;\n\n            while( waitKey(33) != 27 )\n            {\n                img1.convertTo(img2,-1,1,value);\n                video >> img3;\n\n                imshow(\"main1\",img2);\n                imshow(\"main2\",img3);\n            }\n\n            destroyAllWindows();\n\n            return 0;\n        }\n    @endcode\n\n\n    @defgroup highgui_winrt WinRT support\n\n    This figure explains new functionality implemented with WinRT GUI. The new GUI provides an Image control,\n    and a slider panel. Slider panel holds trackbars attached to it.\n\n    Sliders are attached below the image control. Every new slider is added below the previous one.\n\n    See below the example used to generate the figure:\n    @code\n        void sample_app::MainPage::ShowWindow()\n        {\n            static cv::String windowName(\"sample\");\n            cv::winrt_initContainer(this->cvContainer);\n            cv::namedWindow(windowName); // not required\n\n            cv::Mat image = cv::imread(\"Assets/sample.jpg\");\n            cv::Mat converted = cv::Mat(image.rows, image.cols, CV_8UC4);\n            cv::cvtColor(image, converted, COLOR_BGR2BGRA);\n            cv::imshow(windowName, converted); // this will create window if it hasn't been created before\n\n            int state = 42;\n            cv::TrackbarCallback callback = [](int pos, void* userdata)\n            {\n                if (pos == 0) {\n                    cv::destroyWindow(windowName);\n                }\n            };\n            cv::TrackbarCallback callbackTwin = [](int pos, void* userdata)\n            {\n                if (pos >= 70) {\n                    cv::destroyAllWindows();\n                }\n            };\n            cv::createTrackbar(\"Sample trackbar\", windowName, &state, 100, callback);\n            cv::createTrackbar(\"Twin brother\", windowName, &state, 100, callbackTwin);\n        }\n    @endcode\n\n    @defgroup highgui_c C API\n@}\n*/\n\n///////////////////////// graphical user interface //////////////////////////\nnamespace cv\n{\n\n//! @addtogroup highgui\n//! @{\n\n//! Flags for cv::namedWindow\nenum WindowFlags {\n       WINDOW_NORMAL     = 0x00000000, //!< the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size.\n       WINDOW_AUTOSIZE   = 0x00000001, //!< the user cannot resize the window, the size is constrainted by the image displayed.\n       WINDOW_OPENGL     = 0x00001000, //!< window with opengl support.\n\n       WINDOW_FULLSCREEN = 1,          //!< change the window to fullscreen.\n       WINDOW_FREERATIO  = 0x00000100, //!< the image expends as much as it can (no ratio constraint).\n       WINDOW_KEEPRATIO  = 0x00000000  //!< the ratio of the image is respected.\n     };\n\n//! Flags for cv::setWindowProperty / cv::getWindowProperty\nenum WindowPropertyFlags {\n       WND_PROP_FULLSCREEN   = 0, //!< fullscreen property    (can be WINDOW_NORMAL or WINDOW_FULLSCREEN).\n       WND_PROP_AUTOSIZE     = 1, //!< autosize property      (can be WINDOW_NORMAL or WINDOW_AUTOSIZE).\n       WND_PROP_ASPECT_RATIO = 2, //!< window's aspect ration (can be set to WINDOW_FREERATIO or WINDOW_KEEPRATIO).\n       WND_PROP_OPENGL       = 3  //!< opengl support.\n     };\n\n//! Mouse Events see cv::MouseCallback\nenum MouseEventTypes {\n       EVENT_MOUSEMOVE      = 0, //!< indicates that the mouse pointer has moved over the window.\n       EVENT_LBUTTONDOWN    = 1, //!< indicates that the left mouse button is pressed.\n       EVENT_RBUTTONDOWN    = 2, //!< indicates that the right mouse button is pressed.\n       EVENT_MBUTTONDOWN    = 3, //!< indicates that the middle mouse button is pressed.\n       EVENT_LBUTTONUP      = 4, //!< indicates that left mouse button is released.\n       EVENT_RBUTTONUP      = 5, //!< indicates that right mouse button is released.\n       EVENT_MBUTTONUP      = 6, //!< indicates that middle mouse button is released.\n       EVENT_LBUTTONDBLCLK  = 7, //!< indicates that left mouse button is double clicked.\n       EVENT_RBUTTONDBLCLK  = 8, //!< indicates that right mouse button is double clicked.\n       EVENT_MBUTTONDBLCLK  = 9, //!< indicates that middle mouse button is double clicked.\n       EVENT_MOUSEWHEEL     = 10,//!< positive and negative values mean forward and backward scrolling, respectively.\n       EVENT_MOUSEHWHEEL    = 11 //!< positive and negative values mean right and left scrolling, respectively.\n     };\n\n//! Mouse Event Flags see cv::MouseCallback\nenum MouseEventFlags {\n       EVENT_FLAG_LBUTTON   = 1, //!< indicates that the left mouse button is down.\n       EVENT_FLAG_RBUTTON   = 2, //!< indicates that the right mouse button is down.\n       EVENT_FLAG_MBUTTON   = 4, //!< indicates that the middle mouse button is down.\n       EVENT_FLAG_CTRLKEY   = 8, //!< indicates that CTRL Key is pressed.\n       EVENT_FLAG_SHIFTKEY  = 16,//!< indicates that SHIFT Key is pressed.\n       EVENT_FLAG_ALTKEY    = 32 //!< indicates that ALT Key is pressed.\n     };\n\n//! Qt font weight\nenum QtFontWeights {\n        QT_FONT_LIGHT           = 25, //!< Weight of 25\n        QT_FONT_NORMAL          = 50, //!< Weight of 50\n        QT_FONT_DEMIBOLD        = 63, //!< Weight of 63\n        QT_FONT_BOLD            = 75, //!< Weight of 75\n        QT_FONT_BLACK           = 87  //!< Weight of 87\n     };\n\n//! Qt font style\nenum QtFontStyles {\n        QT_STYLE_NORMAL         = 0, //!< Normal font.\n        QT_STYLE_ITALIC         = 1, //!< Italic font.\n        QT_STYLE_OBLIQUE        = 2  //!< Oblique font.\n     };\n\n//! Qt \"button\" type\nenum QtButtonTypes {\n       QT_PUSH_BUTTON = 0, //!< Push button.\n       QT_CHECKBOX    = 1, //!< Checkbox button.\n       QT_RADIOBOX    = 2  //!< Radiobox button.\n     };\n\n/** @brief Callback function for mouse events. see cv::setMouseCallback\n@param event one of the cv::MouseEventTypes constants.\n@param x The x-coordinate of the mouse event.\n@param y The y-coordinate of the mouse event.\n@param flags one of the cv::MouseEventFlags constants.\n@param userdata The optional parameter.\n */\ntypedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata);\n\n/** @brief Callback function for Trackbar see cv::createTrackbar\n@param pos current position of the specified trackbar.\n@param userdata The optional parameter.\n */\ntypedef void (*TrackbarCallback)(int pos, void* userdata);\n\n/** @brief Callback function defined to be called every frame. See cv::setOpenGlDrawCallback\n@param userdata The optional parameter.\n */\ntypedef void (*OpenGlDrawCallback)(void* userdata);\n\n/** @brief Callback function for a button created by cv::createButton\n@param state current state of the button. It could be -1 for a push button, 0 or 1 for a check/radio box button.\n@param userdata The optional parameter.\n */\ntypedef void (*ButtonCallback)(int state, void* userdata);\n\n/** @brief Creates a window.\n\nThe function namedWindow creates a window that can be used as a placeholder for images and\ntrackbars. Created windows are referred to by their names.\n\nIf a window with the same name already exists, the function does nothing.\n\nYou can call cv::destroyWindow or cv::destroyAllWindows to close the window and de-allocate any associated\nmemory usage. For a simple program, you do not really have to call these functions because all the\nresources and windows of the application are closed automatically by the operating system upon exit.\n\n@note\n\nQt backend supports additional flags:\n -   **WINDOW_NORMAL or WINDOW_AUTOSIZE:** WINDOW_NORMAL enables you to resize the\n     window, whereas WINDOW_AUTOSIZE adjusts automatically the window size to fit the\n     displayed image (see imshow ), and you cannot change the window size manually.\n -   **WINDOW_FREERATIO or WINDOW_KEEPRATIO:** WINDOW_FREERATIO adjusts the image\n     with no respect to its ratio, whereas WINDOW_KEEPRATIO keeps the image ratio.\n -   **CV_GUI_NORMAL or CV_GUI_EXPANDED:** CV_GUI_NORMAL is the old way to draw the window\n     without statusbar and toolbar, whereas CV_GUI_EXPANDED is a new enhanced GUI.\nBy default, flags == WINDOW_AUTOSIZE | WINDOW_KEEPRATIO | CV_GUI_EXPANDED\n\n@param winname Name of the window in the window caption that may be used as a window identifier.\n@param flags Flags of the window. The supported flags are: (cv::WindowFlags)\n */\nCV_EXPORTS_W void namedWindow(const String& winname, int flags = WINDOW_AUTOSIZE);\n\n/** @brief Destroys the specified window.\n\nThe function destroyWindow destroys the window with the given name.\n\n@param winname Name of the window to be destroyed.\n */\nCV_EXPORTS_W void destroyWindow(const String& winname);\n\n/** @brief Destroys all of the HighGUI windows.\n\nThe function destroyAllWindows destroys all of the opened HighGUI windows.\n */\nCV_EXPORTS_W void destroyAllWindows();\n\nCV_EXPORTS_W int startWindowThread();\n\n/** @brief Waits for a pressed key.\n\nThe function waitKey waits for a key event infinitely (when \\f$\\texttt{delay}\\leq 0\\f$ ) or for delay\nmilliseconds, when it is positive. Since the OS has a minimum time between switching threads, the\nfunction will not wait exactly delay ms, it will wait at least delay ms, depending on what else is\nrunning on your computer at that time. It returns the code of the pressed key or -1 if no key was\npressed before the specified time had elapsed.\n\n@note\n\nThis function is the only method in HighGUI that can fetch and handle events, so it needs to be\ncalled periodically for normal event processing unless HighGUI is used within an environment that\ntakes care of event processing.\n\n@note\n\nThe function only works if there is at least one HighGUI window created and the window is active.\nIf there are several HighGUI windows, any of them can be active.\n\n@param delay Delay in milliseconds. 0 is the special value that means \"forever\".\n */\nCV_EXPORTS_W int waitKey(int delay = 0);\n\n/** @brief Displays an image in the specified window.\n\nThe function imshow displays an image in the specified window. If the window was created with the\ncv::WINDOW_AUTOSIZE flag, the image is shown with its original size, however it is still limited by the screen resolution.\nOtherwise, the image is scaled to fit the window. The function may scale the image, depending on its depth:\n\n-   If the image is 8-bit unsigned, it is displayed as is.\n-   If the image is 16-bit unsigned or 32-bit integer, the pixels are divided by 256. That is, the\n    value range [0,255\\*256] is mapped to [0,255].\n-   If the image is 32-bit floating-point, the pixel values are multiplied by 255. That is, the\n    value range [0,1] is mapped to [0,255].\n\nIf window was created with OpenGL support, cv::imshow also support ogl::Buffer , ogl::Texture2D and\ncuda::GpuMat as input.\n\nIf the window was not created before this function, it is assumed creating a window with cv::WINDOW_AUTOSIZE.\n\nIf you need to show an image that is bigger than the screen resolution, you will need to call namedWindow(\"\", WINDOW_NORMAL) before the imshow.\n\n@note This function should be followed by cv::waitKey function which displays the image for specified\nmilliseconds. Otherwise, it won't display the image. For example, **waitKey(0)** will display the window\ninfinitely until any keypress (it is suitable for image display). **waitKey(25)** will display a frame\nfor 25 ms, after which display will be automatically closed. (If you put it in a loop to read\nvideos, it will display the video frame-by-frame)\n\n@note\n\n[__Windows Backend Only__] Pressing Ctrl+C will copy the image to the clipboard.\n\n[__Windows Backend Only__] Pressing Ctrl+S will show a dialog to save the image.\n\n@param winname Name of the window.\n@param mat Image to be shown.\n */\nCV_EXPORTS_W void imshow(const String& winname, InputArray mat);\n\n/** @brief Resizes window to the specified size\n\n@note\n\n-   The specified window size is for the image area. Toolbars are not counted.\n-   Only windows created without cv::WINDOW_AUTOSIZE flag can be resized.\n\n@param winname Window name.\n@param width The new window width.\n@param height The new window height.\n */\nCV_EXPORTS_W void resizeWindow(const String& winname, int width, int height);\n\n/** @brief Moves window to the specified position\n\n@param winname Name of the window.\n@param x The new x-coordinate of the window.\n@param y The new y-coordinate of the window.\n */\nCV_EXPORTS_W void moveWindow(const String& winname, int x, int y);\n\n/** @brief Changes parameters of a window dynamically.\n\nThe function setWindowProperty enables changing properties of a window.\n\n@param winname Name of the window.\n@param prop_id Window property to edit. The supported operation flags are: (cv::WindowPropertyFlags)\n@param prop_value New value of the window property. The supported flags are: (cv::WindowFlags)\n */\nCV_EXPORTS_W void setWindowProperty(const String& winname, int prop_id, double prop_value);\n\n/** @brief Updates window title\n@param winname Name of the window.\n@param title New title.\n*/\nCV_EXPORTS_W void setWindowTitle(const String& winname, const String& title);\n\n/** @brief Provides parameters of a window.\n\nThe function getWindowProperty returns properties of a window.\n\n@param winname Name of the window.\n@param prop_id Window property to retrieve. The following operation flags are available: (cv::WindowPropertyFlags)\n\n@sa setWindowProperty\n */\nCV_EXPORTS_W double getWindowProperty(const String& winname, int prop_id);\n\n/** @brief Sets mouse handler for the specified window\n\n@param winname Name of the window.\n@param onMouse Mouse callback. See OpenCV samples, such as\n<https://github.com/Itseez/opencv/tree/master/samples/cpp/ffilldemo.cpp>, on how to specify and\nuse the callback.\n@param userdata The optional parameter passed to the callback.\n */\nCV_EXPORTS void setMouseCallback(const String& winname, MouseCallback onMouse, void* userdata = 0);\n\n/** @brief Gets the mouse-wheel motion delta, when handling mouse-wheel events cv::EVENT_MOUSEWHEEL and\ncv::EVENT_MOUSEHWHEEL.\n\nFor regular mice with a scroll-wheel, delta will be a multiple of 120. The value 120 corresponds to\na one notch rotation of the wheel or the threshold for action to be taken and one such action should\noccur for each delta. Some high-precision mice with higher-resolution freely-rotating wheels may\ngenerate smaller values.\n\nFor cv::EVENT_MOUSEWHEEL positive and negative values mean forward and backward scrolling,\nrespectively. For cv::EVENT_MOUSEHWHEEL, where available, positive and negative values mean right and\nleft scrolling, respectively.\n\nWith the C API, the macro CV_GET_WHEEL_DELTA(flags) can be used alternatively.\n\n@note\n\nMouse-wheel events are currently supported only on Windows.\n\n@param flags The mouse callback flags parameter.\n */\nCV_EXPORTS int getMouseWheelDelta(int flags);\n\n/** @brief Creates a trackbar and attaches it to the specified window.\n\nThe function createTrackbar creates a trackbar (a slider or range control) with the specified name\nand range, assigns a variable value to be a position synchronized with the trackbar and specifies\nthe callback function onChange to be called on the trackbar position change. The created trackbar is\ndisplayed in the specified window winname.\n\n@note\n\n[__Qt Backend Only__] winname can be empty (or NULL) if the trackbar should be attached to the\ncontrol panel.\n\nClicking the label of each trackbar enables editing the trackbar values manually.\n\n@param trackbarname Name of the created trackbar.\n@param winname Name of the window that will be used as a parent of the created trackbar.\n@param value Optional pointer to an integer variable whose value reflects the position of the\nslider. Upon creation, the slider position is defined by this variable.\n@param count Maximal position of the slider. The minimal position is always 0.\n@param onChange Pointer to the function to be called every time the slider changes position. This\nfunction should be prototyped as void Foo(int,void\\*); , where the first parameter is the trackbar\nposition and the second parameter is the user data (see the next parameter). If the callback is\nthe NULL pointer, no callbacks are called, but only value is updated.\n@param userdata User data that is passed as is to the callback. It can be used to handle trackbar\nevents without using global variables.\n */\nCV_EXPORTS int createTrackbar(const String& trackbarname, const String& winname,\n                              int* value, int count,\n                              TrackbarCallback onChange = 0,\n                              void* userdata = 0);\n\n/** @brief Returns the trackbar position.\n\nThe function returns the current position of the specified trackbar.\n\n@note\n\n[__Qt Backend Only__] winname can be empty (or NULL) if the trackbar is attached to the control\npanel.\n\n@param trackbarname Name of the trackbar.\n@param winname Name of the window that is the parent of the trackbar.\n */\nCV_EXPORTS_W int getTrackbarPos(const String& trackbarname, const String& winname);\n\n/** @brief Sets the trackbar position.\n\nThe function sets the position of the specified trackbar in the specified window.\n\n@note\n\n[__Qt Backend Only__] winname can be empty (or NULL) if the trackbar is attached to the control\npanel.\n\n@param trackbarname Name of the trackbar.\n@param winname Name of the window that is the parent of trackbar.\n@param pos New position.\n */\nCV_EXPORTS_W void setTrackbarPos(const String& trackbarname, const String& winname, int pos);\n\n/** @brief Sets the trackbar maximum position.\n\nThe function sets the maximum position of the specified trackbar in the specified window.\n\n@note\n\n[__Qt Backend Only__] winname can be empty (or NULL) if the trackbar is attached to the control\npanel.\n\n@param trackbarname Name of the trackbar.\n@param winname Name of the window that is the parent of trackbar.\n@param maxval New maximum position.\n */\nCV_EXPORTS_W void setTrackbarMax(const String& trackbarname, const String& winname, int maxval);\n\n/** @brief Sets the trackbar minimum position.\n\nThe function sets the minimum position of the specified trackbar in the specified window.\n\n@note\n\n[__Qt Backend Only__] winname can be empty (or NULL) if the trackbar is attached to the control\npanel.\n\n@param trackbarname Name of the trackbar.\n@param winname Name of the window that is the parent of trackbar.\n@param minval New maximum position.\n */\nCV_EXPORTS_W void setTrackbarMin(const String& trackbarname, const String& winname, int minval);\n\n//! @addtogroup highgui_opengl OpenGL support\n//! @{\n\n/** @brief Displays OpenGL 2D texture in the specified window.\n\n@param winname Name of the window.\n@param tex OpenGL 2D texture data.\n */\nCV_EXPORTS void imshow(const String& winname, const ogl::Texture2D& tex);\n\n/** @brief Sets a callback function to be called to draw on top of displayed image.\n\nThe function setOpenGlDrawCallback can be used to draw 3D data on the window. See the example of\ncallback function below:\n@code\n    void on_opengl(void* param)\n    {\n        glLoadIdentity();\n\n        glTranslated(0.0, 0.0, -1.0);\n\n        glRotatef( 55, 1, 0, 0 );\n        glRotatef( 45, 0, 1, 0 );\n        glRotatef( 0, 0, 0, 1 );\n\n        static const int coords[6][4][3] = {\n            { { +1, -1, -1 }, { -1, -1, -1 }, { -1, +1, -1 }, { +1, +1, -1 } },\n            { { +1, +1, -1 }, { -1, +1, -1 }, { -1, +1, +1 }, { +1, +1, +1 } },\n            { { +1, -1, +1 }, { +1, -1, -1 }, { +1, +1, -1 }, { +1, +1, +1 } },\n            { { -1, -1, -1 }, { -1, -1, +1 }, { -1, +1, +1 }, { -1, +1, -1 } },\n            { { +1, -1, +1 }, { -1, -1, +1 }, { -1, -1, -1 }, { +1, -1, -1 } },\n            { { -1, -1, +1 }, { +1, -1, +1 }, { +1, +1, +1 }, { -1, +1, +1 } }\n        };\n\n        for (int i = 0; i < 6; ++i) {\n                    glColor3ub( i*20, 100+i*10, i*42 );\n                    glBegin(GL_QUADS);\n                    for (int j = 0; j < 4; ++j) {\n                            glVertex3d(0.2 * coords[i][j][0], 0.2 * coords[i][j][1], 0.2 * coords[i][j][2]);\n                    }\n                    glEnd();\n        }\n    }\n@endcode\n\n@param winname Name of the window.\n@param onOpenGlDraw Pointer to the function to be called every frame. This function should be\nprototyped as void Foo(void\\*) .\n@param userdata Pointer passed to the callback function.(__Optional__)\n */\nCV_EXPORTS void setOpenGlDrawCallback(const String& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0);\n\n/** @brief Sets the specified window as current OpenGL context.\n\n@param winname Name of the window.\n */\nCV_EXPORTS void setOpenGlContext(const String& winname);\n\n/** @brief Force window to redraw its context and call draw callback ( See cv::setOpenGlDrawCallback ).\n\n@param winname Name of the window.\n */\nCV_EXPORTS void updateWindow(const String& winname);\n\n//! @} highgui_opengl\n\n//! @addtogroup highgui_qt\n//! @{\n\n/** @brief QtFont available only for Qt. See cv::fontQt\n */\nstruct QtFont\n{\n    const char* nameFont;  //!< Name of the font\n    Scalar      color;     //!< Color of the font. Scalar(blue_component, green_component, red_component[, alpha_component])\n    int         font_face; //!< See cv::QtFontStyles\n    const int*  ascii;     //!< font data and metrics\n    const int*  greek;\n    const int*  cyrillic;\n    float       hscale, vscale;\n    float       shear;     //!< slope coefficient: 0 - normal, >0 - italic\n    int         thickness; //!< See cv::QtFontWeights\n    float       dx;        //!< horizontal interval between letters\n    int         line_type; //!< PointSize\n};\n\n/** @brief Creates the font to draw a text on an image.\n\nThe function fontQt creates a cv::QtFont object. This cv::QtFont is not compatible with putText .\n\nA basic usage of this function is the following: :\n@code\n    QtFont font = fontQt(\"Times\");\n    addText( img1, \"Hello World !\", Point(50,50), font);\n@endcode\n\n@param nameFont Name of the font. The name should match the name of a system font (such as\n*Times*). If the font is not found, a default one is used.\n@param pointSize Size of the font. If not specified, equal zero or negative, the point size of the\nfont is set to a system-dependent default value. Generally, this is 12 points.\n@param color Color of the font in BGRA where A = 255 is fully transparent. Use the macro CV_RGB\nfor simplicity.\n@param weight Font weight. Available operation flags are : cv::QtFontWeights You can also specify a positive integer for better control.\n@param style Font style. Available operation flags are : cv::QtFontStyles\n@param spacing Spacing between characters. It can be negative or positive.\n */\nCV_EXPORTS QtFont fontQt(const String& nameFont, int pointSize = -1,\n                         Scalar color = Scalar::all(0), int weight = QT_FONT_NORMAL,\n                         int style = QT_STYLE_NORMAL, int spacing = 0);\n\n/** @brief Draws a text on the image.\n\nThe function addText draws *text* on the image *img* using a specific font *font* (see example cv::fontQt\n)\n\n@param img 8-bit 3-channel image where the text should be drawn.\n@param text Text to write on an image.\n@param org Point(x,y) where the text should start on an image.\n@param font Font to use to draw a text.\n */\nCV_EXPORTS void addText( const Mat& img, const String& text, Point org, const QtFont& font);\n\n/** @brief Displays a text on a window image as an overlay for a specified duration.\n\nThe function displayOverlay displays useful information/tips on top of the window for a certain\namount of time *delayms*. The function does not modify the image, displayed in the window, that is,\nafter the specified delay the original content of the window is restored.\n\n@param winname Name of the window.\n@param text Overlay text to write on a window image.\n@param delayms The period (in milliseconds), during which the overlay text is displayed. If this\nfunction is called before the previous overlay text timed out, the timer is restarted and the text\nis updated. If this value is zero, the text never disappears.\n */\nCV_EXPORTS void displayOverlay(const String& winname, const String& text, int delayms = 0);\n\n/** @brief Displays a text on the window statusbar during the specified period of time.\n\nThe function displayStatusBar displays useful information/tips on top of the window for a certain\namount of time *delayms* . This information is displayed on the window statusbar (the window must be\ncreated with the CV_GUI_EXPANDED flags).\n\n@param winname Name of the window.\n@param text Text to write on the window statusbar.\n@param delayms Duration (in milliseconds) to display the text. If this function is called before\nthe previous text timed out, the timer is restarted and the text is updated. If this value is\nzero, the text never disappears.\n */\nCV_EXPORTS void displayStatusBar(const String& winname, const String& text, int delayms = 0);\n\n/** @brief Saves parameters of the specified window.\n\nThe function saveWindowParameters saves size, location, flags, trackbars value, zoom and panning\nlocation of the window windowName.\n\n@param windowName Name of the window.\n */\nCV_EXPORTS void saveWindowParameters(const String& windowName);\n\n/** @brief Loads parameters of the specified window.\n\nThe function loadWindowParameters loads size, location, flags, trackbars value, zoom and panning\nlocation of the window windowName.\n\n@param windowName Name of the window.\n */\nCV_EXPORTS void loadWindowParameters(const String& windowName);\n\nCV_EXPORTS  int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);\n\nCV_EXPORTS  void stopLoop();\n\n/** @brief Attaches a button to the control panel.\n\nThe function createButton attaches a button to the control panel. Each button is added to a\nbuttonbar to the right of the last button. A new buttonbar is created if nothing was attached to the\ncontrol panel before, or if the last element attached to the control panel was a trackbar.\n\nSee below various examples of the cv::createButton function call: :\n@code\n    createButton(NULL,callbackButton);//create a push button \"button 0\", that will call callbackButton.\n    createButton(\"button2\",callbackButton,NULL,QT_CHECKBOX,0);\n    createButton(\"button3\",callbackButton,&value);\n    createButton(\"button5\",callbackButton1,NULL,QT_RADIOBOX);\n    createButton(\"button6\",callbackButton2,NULL,QT_PUSH_BUTTON,1);\n@endcode\n\n@param  bar_name Name of the button.\n@param on_change Pointer to the function to be called every time the button changes its state.\nThis function should be prototyped as void Foo(int state,\\*void); . *state* is the current state\nof the button. It could be -1 for a push button, 0 or 1 for a check/radio box button.\n@param userdata Pointer passed to the callback function.\n@param type Optional type of the button. Available types are: (cv::QtButtonTypes)\n@param initial_button_state Default state of the button. Use for checkbox and radiobox. Its\nvalue could be 0 or 1. (__Optional__)\n*/\nCV_EXPORTS int createButton( const String& bar_name, ButtonCallback on_change,\n                             void* userdata = 0, int type = QT_PUSH_BUTTON,\n                             bool initial_button_state = false);\n\n//! @} highgui_qt\n\n//! @} highgui\n\n} // cv\n\n#ifndef DISABLE_OPENCV_24_COMPATIBILITY\n#include \"opencv2/highgui/highgui_c.h\"\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/imgcodecs/imgcodecs.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/imgcodecs.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/imgcodecs/imgcodecs_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                        Intel License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000, Intel Corporation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of Intel Corporation may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_IMGCODECS_H__\n#define __OPENCV_IMGCODECS_H__\n\n#include \"opencv2/core/core_c.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n/** @addtogroup imgcodecs_c\n  @{\n  */\n\nenum\n{\n/* 8bit, color or not */\n    CV_LOAD_IMAGE_UNCHANGED  =-1,\n/* 8bit, gray */\n    CV_LOAD_IMAGE_GRAYSCALE  =0,\n/* ?, color */\n    CV_LOAD_IMAGE_COLOR      =1,\n/* any depth, ? */\n    CV_LOAD_IMAGE_ANYDEPTH   =2,\n/* ?, any color */\n    CV_LOAD_IMAGE_ANYCOLOR   =4\n};\n\n/* load image from file\n  iscolor can be a combination of above flags where CV_LOAD_IMAGE_UNCHANGED\n  overrides the other flags\n  using CV_LOAD_IMAGE_ANYCOLOR alone is equivalent to CV_LOAD_IMAGE_UNCHANGED\n  unless CV_LOAD_IMAGE_ANYDEPTH is specified images are converted to 8bit\n*/\nCVAPI(IplImage*) cvLoadImage( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));\nCVAPI(CvMat*) cvLoadImageM( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));\n\nenum\n{\n    CV_IMWRITE_JPEG_QUALITY =1,\n    CV_IMWRITE_JPEG_PROGRESSIVE =2,\n    CV_IMWRITE_JPEG_OPTIMIZE =3,\n    CV_IMWRITE_JPEG_RST_INTERVAL =4,\n    CV_IMWRITE_JPEG_LUMA_QUALITY =5,\n    CV_IMWRITE_JPEG_CHROMA_QUALITY =6,\n    CV_IMWRITE_PNG_COMPRESSION =16,\n    CV_IMWRITE_PNG_STRATEGY =17,\n    CV_IMWRITE_PNG_BILEVEL =18,\n    CV_IMWRITE_PNG_STRATEGY_DEFAULT =0,\n    CV_IMWRITE_PNG_STRATEGY_FILTERED =1,\n    CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2,\n    CV_IMWRITE_PNG_STRATEGY_RLE =3,\n    CV_IMWRITE_PNG_STRATEGY_FIXED =4,\n    CV_IMWRITE_PXM_BINARY =32,\n    CV_IMWRITE_WEBP_QUALITY =64\n};\n\n/* save image to file */\nCVAPI(int) cvSaveImage( const char* filename, const CvArr* image,\n                        const int* params CV_DEFAULT(0) );\n\n/* decode image stored in the buffer */\nCVAPI(IplImage*) cvDecodeImage( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));\nCVAPI(CvMat*) cvDecodeImageM( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));\n\n/* encode image and store the result as a byte vector (single-row 8uC1 matrix) */\nCVAPI(CvMat*) cvEncodeImage( const char* ext, const CvArr* image,\n                             const int* params CV_DEFAULT(0) );\n\nenum\n{\n    CV_CVTIMG_FLIP      =1,\n    CV_CVTIMG_SWAP_RB   =2\n};\n\n/* utility function: convert one image to another with optional vertical flip */\nCVAPI(void) cvConvertImage( const CvArr* src, CvArr* dst, int flags CV_DEFAULT(0));\n\nCVAPI(int) cvHaveImageReader(const char* filename);\nCVAPI(int) cvHaveImageWriter(const char* filename);\n\n\n/****************************************************************************************\\\n*                              Obsolete functions/synonyms                               *\n\\****************************************************************************************/\n\n#define cvvLoadImage(name) cvLoadImage((name),1)\n#define cvvSaveImage cvSaveImage\n#define cvvConvertImage cvConvertImage\n\n/** @} imgcodecs_c */\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif // __OPENCV_IMGCODECS_H__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/imgcodecs/ios.h",
    "content": "\n/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#import <UIKit/UIKit.h>\n#import <Accelerate/Accelerate.h>\n#import <AVFoundation/AVFoundation.h>\n#import <ImageIO/ImageIO.h>\n#include \"opencv2/core/core.hpp\"\n\n//! @addtogroup imgcodecs_ios\n//! @{\n\nUIImage* MatToUIImage(const cv::Mat& image);\nvoid UIImageToMat(const UIImage* image,\n                         cv::Mat& m, bool alphaExist = false);\n\n//! @}\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/imgcodecs.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_IMGCODECS_HPP__\n#define __OPENCV_IMGCODECS_HPP__\n\n#include \"opencv2/core.hpp\"\n\n/**\n  @defgroup imgcodecs Image file reading and writing\n  @{\n    @defgroup imgcodecs_c C API\n    @defgroup imgcodecs_ios iOS glue\n  @}\n*/\n\n//////////////////////////////// image codec ////////////////////////////////\nnamespace cv\n{\n\n//! @addtogroup imgcodecs\n//! @{\n\n//! Imread flags\nenum ImreadModes {\n       IMREAD_UNCHANGED            = -1, //!< If set, return the loaded image as is (with alpha channel, otherwise it gets cropped).\n       IMREAD_GRAYSCALE            = 0,  //!< If set, always convert image to the single channel grayscale image.\n       IMREAD_COLOR                = 1,  //!< If set, always convert image to the 3 channel BGR color image.\n       IMREAD_ANYDEPTH             = 2,  //!< If set, return 16-bit/32-bit image when the input has the corresponding depth, otherwise convert it to 8-bit.\n       IMREAD_ANYCOLOR             = 4,  //!< If set, the image is read in any possible color format.\n       IMREAD_LOAD_GDAL            = 8,  //!< If set, use the gdal driver for loading the image.\n       IMREAD_REDUCED_GRAYSCALE_2  = 16, //!< If set, always convert image to the single channel grayscale image and the image size reduced 1/2.\n       IMREAD_REDUCED_COLOR_2      = 17, //!< If set, always convert image to the 3 channel BGR color image and the image size reduced 1/2.\n       IMREAD_REDUCED_GRAYSCALE_4  = 32, //!< If set, always convert image to the single channel grayscale image and the image size reduced 1/4.\n       IMREAD_REDUCED_COLOR_4      = 33, //!< If set, always convert image to the 3 channel BGR color image and the image size reduced 1/4.\n       IMREAD_REDUCED_GRAYSCALE_8  = 64, //!< If set, always convert image to the single channel grayscale image and the image size reduced 1/8.\n       IMREAD_REDUCED_COLOR_8      = 65  //!< If set, always convert image to the 3 channel BGR color image and the image size reduced 1/8.\n     };\n\n//! Imwrite flags\nenum ImwriteFlags {\n       IMWRITE_JPEG_QUALITY        = 1,  //!< For JPEG, it can be a quality from 0 to 100 (the higher is the better). Default value is 95.\n       IMWRITE_JPEG_PROGRESSIVE    = 2,  //!< Enable JPEG features, 0 or 1, default is False.\n       IMWRITE_JPEG_OPTIMIZE       = 3,  //!< Enable JPEG features, 0 or 1, default is False.\n       IMWRITE_JPEG_RST_INTERVAL   = 4,  //!< JPEG restart interval, 0 - 65535, default is 0 - no restart.\n       IMWRITE_JPEG_LUMA_QUALITY   = 5,  //!< Separate luma quality level, 0 - 100, default is 0 - don't use.\n       IMWRITE_JPEG_CHROMA_QUALITY = 6,  //!< Separate chroma quality level, 0 - 100, default is 0 - don't use.\n       IMWRITE_PNG_COMPRESSION     = 16, //!< For PNG, it can be the compression level from 0 to 9. A higher value means a smaller size and longer compression time. Default value is 3.\n       IMWRITE_PNG_STRATEGY        = 17, //!< One of cv::ImwritePNGFlags, default is IMWRITE_PNG_STRATEGY_DEFAULT.\n       IMWRITE_PNG_BILEVEL         = 18, //!< Binary level PNG, 0 or 1, default is 0.\n       IMWRITE_PXM_BINARY          = 32, //!< For PPM, PGM, or PBM, it can be a binary format flag, 0 or 1. Default value is 1.\n       IMWRITE_WEBP_QUALITY        = 64  //!< For WEBP, it can be a quality from 1 to 100 (the higher is the better). By default (without any parameter) and for quality above 100 the lossless compression is used.\n     };\n\n//! Imwrite PNG specific flags used to tune the compression algorithm.\n/** These flags will be modify the way of PNG image compression and will be passed to the underlying zlib processing stage.\n\n-   The effect of IMWRITE_PNG_STRATEGY_FILTERED is to force more Huffman coding and less string matching; it is somewhat intermediate between IMWRITE_PNG_STRATEGY_DEFAULT and IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY.\n-   IMWRITE_PNG_STRATEGY_RLE is designed to be almost as fast as IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, but give better compression for PNG image data.\n-   The strategy parameter only affects the compression ratio but not the correctness of the compressed output even if it is not set appropriately.\n-   IMWRITE_PNG_STRATEGY_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler decoder for special applications.\n*/\nenum ImwritePNGFlags {\n       IMWRITE_PNG_STRATEGY_DEFAULT      = 0, //!< Use this value for normal data.\n       IMWRITE_PNG_STRATEGY_FILTERED     = 1, //!< Use this value for data produced by a filter (or predictor).Filtered data consists mostly of small values with a somewhat random distribution. In this case, the compression algorithm is tuned to compress them better.\n       IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2, //!< Use this value to force Huffman encoding only (no string match).\n       IMWRITE_PNG_STRATEGY_RLE          = 3, //!< Use this value to limit match distances to one (run-length encoding).\n       IMWRITE_PNG_STRATEGY_FIXED        = 4  //!< Using this value prevents the use of dynamic Huffman codes, allowing for a simpler decoder for special applications.\n     };\n\n/** @brief Loads an image from a file.\n\n@anchor imread\n\nThe function imread loads an image from the specified file and returns it. If the image cannot be\nread (because of missing file, improper permissions, unsupported or invalid format), the function\nreturns an empty matrix ( Mat::data==NULL ).\n\nCurrently, the following file formats are supported:\n\n-   Windows bitmaps - \\*.bmp, \\*.dib (always supported)\n-   JPEG files - \\*.jpeg, \\*.jpg, \\*.jpe (see the *Notes* section)\n-   JPEG 2000 files - \\*.jp2 (see the *Notes* section)\n-   Portable Network Graphics - \\*.png (see the *Notes* section)\n-   WebP - \\*.webp (see the *Notes* section)\n-   Portable image format - \\*.pbm, \\*.pgm, \\*.ppm \\*.pxm, \\*.pnm (always supported)\n-   Sun rasters - \\*.sr, \\*.ras (always supported)\n-   TIFF files - \\*.tiff, \\*.tif (see the *Notes* section)\n-   OpenEXR Image files - \\*.exr (see the *Notes* section)\n-   Radiance HDR - \\*.hdr, \\*.pic (always supported)\n-   Raster and Vector geospatial data supported by Gdal (see the *Notes* section)\n\n@note\n\n-   The function determines the type of an image by the content, not by the file extension.\n-   In the case of color images, the decoded images will have the channels stored in **B G R** order.\n-   On Microsoft Windows\\* OS and MacOSX\\*, the codecs shipped with an OpenCV image (libjpeg,\n    libpng, libtiff, and libjasper) are used by default. So, OpenCV can always read JPEGs, PNGs,\n    and TIFFs. On MacOSX, there is also an option to use native MacOSX image readers. But beware\n    that currently these native image loaders give images with different pixel values because of\n    the color management embedded into MacOSX.\n-   On Linux\\*, BSD flavors and other Unix-like open-source operating systems, OpenCV looks for\n    codecs supplied with an OS image. Install the relevant packages (do not forget the development\n    files, for example, \"libjpeg-dev\", in Debian\\* and Ubuntu\\*) to get the codec support or turn\n    on the OPENCV_BUILD_3RDPARTY_LIBS flag in CMake.\n-   In the case you set *WITH_GDAL* flag to true in CMake and @ref IMREAD_LOAD_GDAL to load the image,\n    then [GDAL](http://www.gdal.org) driver will be used in order to decode the image by supporting\n    the following formats: [Raster](http://www.gdal.org/formats_list.html),\n    [Vector](http://www.gdal.org/ogr_formats.html).\n@param filename Name of file to be loaded.\n@param flags Flag that can take values of cv::ImreadModes\n*/\nCV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR );\n\n/** @brief Loads a multi-page image from a file.\n\nThe function imreadmulti loads a multi-page image from the specified file into a vector of Mat objects.\n@param filename Name of file to be loaded.\n@param flags Flag that can take values of cv::ImreadModes, default with cv::IMREAD_ANYCOLOR.\n@param mats A vector of Mat objects holding each page, if more than one.\n@sa cv::imread\n*/\nCV_EXPORTS_W bool imreadmulti(const String& filename, std::vector<Mat>& mats, int flags = IMREAD_ANYCOLOR);\n\n/** @brief Saves an image to a specified file.\n\nThe function imwrite saves the image to the specified file. The image format is chosen based on the\nfilename extension (see cv::imread for the list of extensions). Only 8-bit (or 16-bit unsigned (CV_16U)\nin case of PNG, JPEG 2000, and TIFF) single-channel or 3-channel (with 'BGR' channel order) images\ncan be saved using this function. If the format, depth or channel order is different, use\nMat::convertTo , and cv::cvtColor to convert it before saving. Or, use the universal FileStorage I/O\nfunctions to save the image to XML or YAML format.\n\nIt is possible to store PNG images with an alpha channel using this function. To do this, create\n8-bit (or 16-bit) 4-channel image BGRA, where the alpha channel goes last. Fully transparent pixels\nshould have alpha set to 0, fully opaque pixels should have alpha set to 255/65535.\n\nThe sample below shows how to create such a BGRA image and store to PNG file. It also demonstrates how to set custom\ncompression parameters :\n@code\n    #include <opencv2/opencv.hpp>\n\n    using namespace cv;\n    using namespace std;\n\n    void createAlphaMat(Mat &mat)\n    {\n        CV_Assert(mat.channels() == 4);\n        for (int i = 0; i < mat.rows; ++i) {\n            for (int j = 0; j < mat.cols; ++j) {\n                Vec4b& bgra = mat.at<Vec4b>(i, j);\n                bgra[0] = UCHAR_MAX; // Blue\n                bgra[1] = saturate_cast<uchar>((float (mat.cols - j)) / ((float)mat.cols) * UCHAR_MAX); // Green\n                bgra[2] = saturate_cast<uchar>((float (mat.rows - i)) / ((float)mat.rows) * UCHAR_MAX); // Red\n                bgra[3] = saturate_cast<uchar>(0.5 * (bgra[1] + bgra[2])); // Alpha\n            }\n        }\n    }\n\n    int main(int argv, char **argc)\n    {\n        // Create mat with alpha channel\n        Mat mat(480, 640, CV_8UC4);\n        createAlphaMat(mat);\n\n        vector<int> compression_params;\n        compression_params.push_back(IMWRITE_PNG_COMPRESSION);\n        compression_params.push_back(9);\n\n        try {\n            imwrite(\"alpha.png\", mat, compression_params);\n        }\n        catch (cv::Exception& ex) {\n            fprintf(stderr, \"Exception converting image to PNG format: %s\\n\", ex.what());\n            return 1;\n        }\n\n        fprintf(stdout, \"Saved PNG file with alpha data.\\n\");\n        return 0;\n    }\n@endcode\n@param filename Name of the file.\n@param img Image to be saved.\n@param params Format-specific parameters encoded as pairs (paramId_1, paramValue_1, paramId_2, paramValue_2, ... .) see cv::ImwriteFlags\n*/\nCV_EXPORTS_W bool imwrite( const String& filename, InputArray img,\n              const std::vector<int>& params = std::vector<int>());\n\n/** @brief Reads an image from a buffer in memory.\n\nThe function imdecode reads an image from the specified buffer in the memory. If the buffer is too short or\ncontains invalid data, the function returns an empty matrix ( Mat::data==NULL ).\n\nSee cv::imread for the list of supported formats and flags description.\n\n@note In the case of color images, the decoded images will have the channels stored in **B G R** order.\n@param buf Input array or vector of bytes.\n@param flags The same flags as in cv::imread, see cv::ImreadModes.\n*/\nCV_EXPORTS_W Mat imdecode( InputArray buf, int flags );\n\n/** @overload\n@param buf\n@param flags\n@param dst The optional output placeholder for the decoded matrix. It can save the image\nreallocations when the function is called repeatedly for images of the same size.\n*/\nCV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst);\n\n/** @brief Encodes an image into a memory buffer.\n\nThe function imencode compresses the image and stores it in the memory buffer that is resized to fit the\nresult. See cv::imwrite for the list of supported formats and flags description.\n\n@param ext File extension that defines the output format.\n@param img Image to be written.\n@param buf Output buffer resized to fit the compressed image.\n@param params Format-specific parameters. See cv::imwrite and cv::ImwriteFlags.\n*/\nCV_EXPORTS_W bool imencode( const String& ext, InputArray img,\n                            CV_OUT std::vector<uchar>& buf,\n                            const std::vector<int>& params = std::vector<int>());\n\n//! @} imgcodecs\n\n} // cv\n\n#endif //__OPENCV_IMGCODECS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/imgproc/detail/distortion_model.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_IMGPROC_DETAIL_DISTORTION_MODEL_HPP__\n#define __OPENCV_IMGPROC_DETAIL_DISTORTION_MODEL_HPP__\n\n//! @cond IGNORED\n\nnamespace cv { namespace detail {\n/**\nComputes the matrix for the projection onto a tilted image sensor\n\\param tauX angular parameter rotation around x-axis\n\\param tauY angular parameter rotation around y-axis\n\\param matTilt if not NULL returns the matrix\n\\f[\n\\vecthreethree{R_{33}(\\tau_x, \\tau_y)}{0}{-R_{13}((\\tau_x, \\tau_y)}\n{0}{R_{33}(\\tau_x, \\tau_y)}{-R_{23}(\\tau_x, \\tau_y)}\n{0}{0}{1} R(\\tau_x, \\tau_y)\n\\f]\nwhere\n\\f[\nR(\\tau_x, \\tau_y) =\n\\vecthreethree{\\cos(\\tau_y)}{0}{-\\sin(\\tau_y)}{0}{1}{0}{\\sin(\\tau_y)}{0}{\\cos(\\tau_y)}\n\\vecthreethree{1}{0}{0}{0}{\\cos(\\tau_x)}{\\sin(\\tau_x)}{0}{-\\sin(\\tau_x)}{\\cos(\\tau_x)} =\n\\vecthreethree{\\cos(\\tau_y)}{\\sin(\\tau_y)\\sin(\\tau_x)}{-\\sin(\\tau_y)\\cos(\\tau_x)}\n{0}{\\cos(\\tau_x)}{\\sin(\\tau_x)}\n{\\sin(\\tau_y)}{-\\cos(\\tau_y)\\sin(\\tau_x)}{\\cos(\\tau_y)\\cos(\\tau_x)}.\n\\f]\n\\param dMatTiltdTauX if not NULL it returns the derivative of matTilt with\nrespect to \\f$\\tau_x\\f$.\n\\param dMatTiltdTauY if not NULL it returns the derivative of matTilt with\nrespect to \\f$\\tau_y\\f$.\n\\param invMatTilt if not NULL it returns the inverse of matTilt\n**/\ntemplate <typename FLOAT>\nvoid computeTiltProjectionMatrix(FLOAT tauX,\n    FLOAT tauY,\n    Matx<FLOAT, 3, 3>* matTilt = 0,\n    Matx<FLOAT, 3, 3>* dMatTiltdTauX = 0,\n    Matx<FLOAT, 3, 3>* dMatTiltdTauY = 0,\n    Matx<FLOAT, 3, 3>* invMatTilt = 0)\n{\n    FLOAT cTauX = cos(tauX);\n    FLOAT sTauX = sin(tauX);\n    FLOAT cTauY = cos(tauY);\n    FLOAT sTauY = sin(tauY);\n    Matx<FLOAT, 3, 3> matRotX = Matx<FLOAT, 3, 3>(1,0,0,0,cTauX,sTauX,0,-sTauX,cTauX);\n    Matx<FLOAT, 3, 3> matRotY = Matx<FLOAT, 3, 3>(cTauY,0,-sTauY,0,1,0,sTauY,0,cTauY);\n    Matx<FLOAT, 3, 3> matRotXY = matRotY * matRotX;\n    Matx<FLOAT, 3, 3> matProjZ = Matx<FLOAT, 3, 3>(matRotXY(2,2),0,-matRotXY(0,2),0,matRotXY(2,2),-matRotXY(1,2),0,0,1);\n    if (matTilt)\n    {\n        // Matrix for trapezoidal distortion of tilted image sensor\n        *matTilt = matProjZ * matRotXY;\n    }\n    if (dMatTiltdTauX)\n    {\n        // Derivative with respect to tauX\n        Matx<FLOAT, 3, 3> dMatRotXYdTauX = matRotY * Matx<FLOAT, 3, 3>(0,0,0,0,-sTauX,cTauX,0,-cTauX,-sTauX);\n        Matx<FLOAT, 3, 3> dMatProjZdTauX = Matx<FLOAT, 3, 3>(dMatRotXYdTauX(2,2),0,-dMatRotXYdTauX(0,2),\n          0,dMatRotXYdTauX(2,2),-dMatRotXYdTauX(1,2),0,0,0);\n        *dMatTiltdTauX = (matProjZ * dMatRotXYdTauX) + (dMatProjZdTauX * matRotXY);\n    }\n    if (dMatTiltdTauY)\n    {\n        // Derivative with respect to tauY\n        Matx<FLOAT, 3, 3> dMatRotXYdTauY = Matx<FLOAT, 3, 3>(-sTauY,0,-cTauY,0,0,0,cTauY,0,-sTauY) * matRotX;\n        Matx<FLOAT, 3, 3> dMatProjZdTauY = Matx<FLOAT, 3, 3>(dMatRotXYdTauY(2,2),0,-dMatRotXYdTauY(0,2),\n          0,dMatRotXYdTauY(2,2),-dMatRotXYdTauY(1,2),0,0,0);\n        *dMatTiltdTauY = (matProjZ * dMatRotXYdTauY) + (dMatProjZdTauY * matRotXY);\n    }\n    if (invMatTilt)\n    {\n        FLOAT inv = 1./matRotXY(2,2);\n        Matx<FLOAT, 3, 3> invMatProjZ = Matx<FLOAT, 3, 3>(inv,0,inv*matRotXY(0,2),0,inv,inv*matRotXY(1,2),0,0,1);\n        *invMatTilt = matRotXY.t()*invMatProjZ;\n    }\n}\n}} // namespace detail, cv\n\n\n//! @endcond\n\n#endif // __OPENCV_IMGPROC_DETAIL_DISTORTION_MODEL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/imgproc/imgproc.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/imgproc.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/imgproc/imgproc_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_IMGPROC_IMGPROC_C_H__\n#define __OPENCV_IMGPROC_IMGPROC_C_H__\n\n#include \"opencv2/imgproc/types_c.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/** @addtogroup imgproc_c\n@{\n*/\n\n/*********************** Background statistics accumulation *****************************/\n\n/** @brief Adds image to accumulator\n@see cv::accumulate\n*/\nCVAPI(void)  cvAcc( const CvArr* image, CvArr* sum,\n                   const CvArr* mask CV_DEFAULT(NULL) );\n\n/** @brief Adds squared image to accumulator\n@see cv::accumulateSquare\n*/\nCVAPI(void)  cvSquareAcc( const CvArr* image, CvArr* sqsum,\n                         const CvArr* mask CV_DEFAULT(NULL) );\n\n/** @brief Adds a product of two images to accumulator\n@see cv::accumulateProduct\n*/\nCVAPI(void)  cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc,\n                           const CvArr* mask CV_DEFAULT(NULL) );\n\n/** @brief Adds image to accumulator with weights: acc = acc*(1-alpha) + image*alpha\n@see cv::accumulateWeighted\n*/\nCVAPI(void)  cvRunningAvg( const CvArr* image, CvArr* acc, double alpha,\n                          const CvArr* mask CV_DEFAULT(NULL) );\n\n/****************************************************************************************\\\n*                                    Image Processing                                    *\n\\****************************************************************************************/\n\n/** Copies source 2D array inside of the larger destination array and\n   makes a border of the specified type (IPL_BORDER_*) around the copied area. */\nCVAPI(void) cvCopyMakeBorder( const CvArr* src, CvArr* dst, CvPoint offset,\n                              int bordertype, CvScalar value CV_DEFAULT(cvScalarAll(0)));\n\n/** @brief Smooths the image in one of several ways.\n\n@param src The source image\n@param dst The destination image\n@param smoothtype Type of the smoothing, see SmoothMethod_c\n@param size1 The first parameter of the smoothing operation, the aperture width. Must be a\npositive odd number (1, 3, 5, ...)\n@param size2 The second parameter of the smoothing operation, the aperture height. Ignored by\nCV_MEDIAN and CV_BILATERAL methods. In the case of simple scaled/non-scaled and Gaussian blur if\nsize2 is zero, it is set to size1. Otherwise it must be a positive odd number.\n@param sigma1 In the case of a Gaussian parameter this parameter may specify Gaussian \\f$\\sigma\\f$\n(standard deviation). If it is zero, it is calculated from the kernel size:\n\\f[\\sigma  = 0.3 (n/2 - 1) + 0.8  \\quad   \\text{where}   \\quad  n= \\begin{array}{l l} \\mbox{\\texttt{size1} for horizontal kernel} \\\\ \\mbox{\\texttt{size2} for vertical kernel} \\end{array}\\f]\nUsing standard sigma for small kernels ( \\f$3\\times 3\\f$ to \\f$7\\times 7\\f$ ) gives better speed. If\nsigma1 is not zero, while size1 and size2 are zeros, the kernel size is calculated from the\nsigma (to provide accurate enough operation).\n@param sigma2 additional parameter for bilateral filtering\n\n@see cv::GaussianBlur, cv::blur, cv::medianBlur, cv::bilateralFilter.\n */\nCVAPI(void) cvSmooth( const CvArr* src, CvArr* dst,\n                      int smoothtype CV_DEFAULT(CV_GAUSSIAN),\n                      int size1 CV_DEFAULT(3),\n                      int size2 CV_DEFAULT(0),\n                      double sigma1 CV_DEFAULT(0),\n                      double sigma2 CV_DEFAULT(0));\n\n/** @brief Convolves an image with the kernel.\n\n@param src input image.\n@param dst output image of the same size and the same number of channels as src.\n@param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point\nmatrix; if you want to apply different kernels to different channels, split the image into\nseparate color planes using split and process them individually.\n@param anchor anchor of the kernel that indicates the relative position of a filtered point within\nthe kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor\nis at the kernel center.\n\n@see cv::filter2D\n */\nCVAPI(void) cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel,\n                        CvPoint anchor CV_DEFAULT(cvPoint(-1,-1)));\n\n/** @brief Finds integral image: SUM(X,Y) = sum(x<X,y<Y)I(x,y)\n@see cv::integral\n*/\nCVAPI(void) cvIntegral( const CvArr* image, CvArr* sum,\n                       CvArr* sqsum CV_DEFAULT(NULL),\n                       CvArr* tilted_sum CV_DEFAULT(NULL));\n\n/** @brief Smoothes the input image with gaussian kernel and then down-samples it.\n\n   dst_width = floor(src_width/2)[+1],\n   dst_height = floor(src_height/2)[+1]\n   @see cv::pyrDown\n*/\nCVAPI(void)  cvPyrDown( const CvArr* src, CvArr* dst,\n                        int filter CV_DEFAULT(CV_GAUSSIAN_5x5) );\n\n/** @brief Up-samples image and smoothes the result with gaussian kernel.\n\n   dst_width = src_width*2,\n   dst_height = src_height*2\n   @see cv::pyrUp\n*/\nCVAPI(void)  cvPyrUp( const CvArr* src, CvArr* dst,\n                      int filter CV_DEFAULT(CV_GAUSSIAN_5x5) );\n\n/** @brief Builds pyramid for an image\n@see buildPyramid\n*/\nCVAPI(CvMat**) cvCreatePyramid( const CvArr* img, int extra_layers, double rate,\n                                const CvSize* layer_sizes CV_DEFAULT(0),\n                                CvArr* bufarr CV_DEFAULT(0),\n                                int calc CV_DEFAULT(1),\n                                int filter CV_DEFAULT(CV_GAUSSIAN_5x5) );\n\n/** @brief Releases pyramid */\nCVAPI(void)  cvReleasePyramid( CvMat*** pyramid, int extra_layers );\n\n\n/** @brief Filters image using meanshift algorithm\n@see cv::pyrMeanShiftFiltering\n*/\nCVAPI(void) cvPyrMeanShiftFiltering( const CvArr* src, CvArr* dst,\n    double sp, double sr, int max_level CV_DEFAULT(1),\n    CvTermCriteria termcrit CV_DEFAULT(cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1)));\n\n/** @brief Segments image using seed \"markers\"\n@see cv::watershed\n*/\nCVAPI(void) cvWatershed( const CvArr* image, CvArr* markers );\n\n/** @brief Calculates an image derivative using generalized Sobel\n\n   (aperture_size = 1,3,5,7) or Scharr (aperture_size = -1) operator.\n   Scharr can be used only for the first dx or dy derivative\n@see cv::Sobel\n*/\nCVAPI(void) cvSobel( const CvArr* src, CvArr* dst,\n                    int xorder, int yorder,\n                    int aperture_size CV_DEFAULT(3));\n\n/** @brief Calculates the image Laplacian: (d2/dx + d2/dy)I\n@see cv::Laplacian\n*/\nCVAPI(void) cvLaplace( const CvArr* src, CvArr* dst,\n                      int aperture_size CV_DEFAULT(3) );\n\n/** @brief Converts input array pixels from one color space to another\n@see cv::cvtColor\n*/\nCVAPI(void)  cvCvtColor( const CvArr* src, CvArr* dst, int code );\n\n\n/** @brief Resizes image (input array is resized to fit the destination array)\n@see cv::resize\n*/\nCVAPI(void)  cvResize( const CvArr* src, CvArr* dst,\n                       int interpolation CV_DEFAULT( CV_INTER_LINEAR ));\n\n/** @brief Warps image with affine transform\n@note ::cvGetQuadrangleSubPix is similar to ::cvWarpAffine, but the outliers are extrapolated using\nreplication border mode.\n@see cv::warpAffine\n*/\nCVAPI(void)  cvWarpAffine( const CvArr* src, CvArr* dst, const CvMat* map_matrix,\n                           int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS),\n                           CvScalar fillval CV_DEFAULT(cvScalarAll(0)) );\n\n/** @brief Computes affine transform matrix for mapping src[i] to dst[i] (i=0,1,2)\n@see cv::getAffineTransform\n*/\nCVAPI(CvMat*) cvGetAffineTransform( const CvPoint2D32f * src,\n                                    const CvPoint2D32f * dst,\n                                    CvMat * map_matrix );\n\n/** @brief Computes rotation_matrix matrix\n@see cv::getRotationMatrix2D\n*/\nCVAPI(CvMat*)  cv2DRotationMatrix( CvPoint2D32f center, double angle,\n                                   double scale, CvMat* map_matrix );\n\n/** @brief Warps image with perspective (projective) transform\n@see cv::warpPerspective\n*/\nCVAPI(void)  cvWarpPerspective( const CvArr* src, CvArr* dst, const CvMat* map_matrix,\n                                int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS),\n                                CvScalar fillval CV_DEFAULT(cvScalarAll(0)) );\n\n/** @brief Computes perspective transform matrix for mapping src[i] to dst[i] (i=0,1,2,3)\n@see cv::getPerspectiveTransform\n*/\nCVAPI(CvMat*) cvGetPerspectiveTransform( const CvPoint2D32f* src,\n                                         const CvPoint2D32f* dst,\n                                         CvMat* map_matrix );\n\n/** @brief Performs generic geometric transformation using the specified coordinate maps\n@see cv::remap\n*/\nCVAPI(void)  cvRemap( const CvArr* src, CvArr* dst,\n                      const CvArr* mapx, const CvArr* mapy,\n                      int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS),\n                      CvScalar fillval CV_DEFAULT(cvScalarAll(0)) );\n\n/** @brief Converts mapx & mapy from floating-point to integer formats for cvRemap\n@see cv::convertMaps\n*/\nCVAPI(void)  cvConvertMaps( const CvArr* mapx, const CvArr* mapy,\n                            CvArr* mapxy, CvArr* mapalpha );\n\n/** @brief Performs forward or inverse log-polar image transform\n@see cv::logPolar\n*/\nCVAPI(void)  cvLogPolar( const CvArr* src, CvArr* dst,\n                         CvPoint2D32f center, double M,\n                         int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS));\n\n/** Performs forward or inverse linear-polar image transform\n@see cv::linearPolar\n*/\nCVAPI(void)  cvLinearPolar( const CvArr* src, CvArr* dst,\n                         CvPoint2D32f center, double maxRadius,\n                         int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS));\n\n/** @brief Transforms the input image to compensate lens distortion\n@see cv::undistort\n*/\nCVAPI(void) cvUndistort2( const CvArr* src, CvArr* dst,\n                          const CvMat* camera_matrix,\n                          const CvMat* distortion_coeffs,\n                          const CvMat* new_camera_matrix CV_DEFAULT(0) );\n\n/** @brief Computes transformation map from intrinsic camera parameters\n   that can used by cvRemap\n*/\nCVAPI(void) cvInitUndistortMap( const CvMat* camera_matrix,\n                                const CvMat* distortion_coeffs,\n                                CvArr* mapx, CvArr* mapy );\n\n/** @brief Computes undistortion+rectification map for a head of stereo camera\n@see cv::initUndistortRectifyMap\n*/\nCVAPI(void) cvInitUndistortRectifyMap( const CvMat* camera_matrix,\n                                       const CvMat* dist_coeffs,\n                                       const CvMat *R, const CvMat* new_camera_matrix,\n                                       CvArr* mapx, CvArr* mapy );\n\n/** @brief Computes the original (undistorted) feature coordinates\n   from the observed (distorted) coordinates\n@see cv::undistortPoints\n*/\nCVAPI(void) cvUndistortPoints( const CvMat* src, CvMat* dst,\n                               const CvMat* camera_matrix,\n                               const CvMat* dist_coeffs,\n                               const CvMat* R CV_DEFAULT(0),\n                               const CvMat* P CV_DEFAULT(0));\n\n/** @brief Returns a structuring element of the specified size and shape for morphological operations.\n\n@note the created structuring element IplConvKernel\\* element must be released in the end using\n`cvReleaseStructuringElement(&element)`.\n\n@param cols Width of the structuring element\n@param rows Height of the structuring element\n@param anchor_x x-coordinate of the anchor\n@param anchor_y y-coordinate of the anchor\n@param shape element shape that could be one of the cv::MorphShapes_c\n@param values integer array of cols*rows elements that specifies the custom shape of the\nstructuring element, when shape=CV_SHAPE_CUSTOM.\n\n@see cv::getStructuringElement\n */\n CVAPI(IplConvKernel*)  cvCreateStructuringElementEx(\n            int cols, int  rows, int  anchor_x, int  anchor_y,\n            int shape, int* values CV_DEFAULT(NULL) );\n\n/** @brief releases structuring element\n@see cvCreateStructuringElementEx\n*/\nCVAPI(void)  cvReleaseStructuringElement( IplConvKernel** element );\n\n/** @brief erodes input image (applies minimum filter) one or more times.\n   If element pointer is NULL, 3x3 rectangular element is used\n@see cv::erode\n*/\nCVAPI(void)  cvErode( const CvArr* src, CvArr* dst,\n                      IplConvKernel* element CV_DEFAULT(NULL),\n                      int iterations CV_DEFAULT(1) );\n\n/** @brief dilates input image (applies maximum filter) one or more times.\n\n   If element pointer is NULL, 3x3 rectangular element is used\n@see cv::dilate\n*/\nCVAPI(void)  cvDilate( const CvArr* src, CvArr* dst,\n                       IplConvKernel* element CV_DEFAULT(NULL),\n                       int iterations CV_DEFAULT(1) );\n\n/** @brief Performs complex morphological transformation\n@see cv::morphologyEx\n*/\nCVAPI(void)  cvMorphologyEx( const CvArr* src, CvArr* dst,\n                             CvArr* temp, IplConvKernel* element,\n                             int operation, int iterations CV_DEFAULT(1) );\n\n/** @brief Calculates all spatial and central moments up to the 3rd order\n@see cv::moments\n*/\nCVAPI(void) cvMoments( const CvArr* arr, CvMoments* moments, int binary CV_DEFAULT(0));\n\n/** @brief Retrieve spatial moments */\nCVAPI(double)  cvGetSpatialMoment( CvMoments* moments, int x_order, int y_order );\n/** @brief Retrieve central moments */\nCVAPI(double)  cvGetCentralMoment( CvMoments* moments, int x_order, int y_order );\n/** @brief Retrieve normalized central moments */\nCVAPI(double)  cvGetNormalizedCentralMoment( CvMoments* moments,\n                                             int x_order, int y_order );\n\n/** @brief Calculates 7 Hu's invariants from precalculated spatial and central moments\n@see cv::HuMoments\n*/\nCVAPI(void) cvGetHuMoments( CvMoments*  moments, CvHuMoments*  hu_moments );\n\n/*********************************** data sampling **************************************/\n\n/** @brief Fetches pixels that belong to the specified line segment and stores them to the buffer.\n\n   Returns the number of retrieved points.\n@see cv::LineSegmentDetector\n*/\nCVAPI(int)  cvSampleLine( const CvArr* image, CvPoint pt1, CvPoint pt2, void* buffer,\n                          int connectivity CV_DEFAULT(8));\n\n/** @brief Retrieves the rectangular image region with specified center from the input array.\n\n dst(x,y) <- src(x + center.x - dst_width/2, y + center.y - dst_height/2).\n Values of pixels with fractional coordinates are retrieved using bilinear interpolation\n@see cv::getRectSubPix\n*/\nCVAPI(void)  cvGetRectSubPix( const CvArr* src, CvArr* dst, CvPoint2D32f center );\n\n\n/** @brief Retrieves quadrangle from the input array.\n\n    matrixarr = ( a11  a12 | b1 )   dst(x,y) <- src(A[x y]' + b)\n                ( a21  a22 | b2 )   (bilinear interpolation is used to retrieve pixels\n                                     with fractional coordinates)\n@see cvWarpAffine\n*/\nCVAPI(void)  cvGetQuadrangleSubPix( const CvArr* src, CvArr* dst,\n                                    const CvMat* map_matrix );\n\n/** @brief Measures similarity between template and overlapped windows in the source image\n   and fills the resultant image with the measurements\n@see cv::matchTemplate\n*/\nCVAPI(void)  cvMatchTemplate( const CvArr* image, const CvArr* templ,\n                              CvArr* result, int method );\n\n/** @brief Computes earth mover distance between\n   two weighted point sets (called signatures)\n@see cv::EMD\n*/\nCVAPI(float)  cvCalcEMD2( const CvArr* signature1,\n                          const CvArr* signature2,\n                          int distance_type,\n                          CvDistanceFunction distance_func CV_DEFAULT(NULL),\n                          const CvArr* cost_matrix CV_DEFAULT(NULL),\n                          CvArr* flow CV_DEFAULT(NULL),\n                          float* lower_bound CV_DEFAULT(NULL),\n                          void* userdata CV_DEFAULT(NULL));\n\n/****************************************************************************************\\\n*                              Contours retrieving                                       *\n\\****************************************************************************************/\n\n/** @brief Retrieves outer and optionally inner boundaries of white (non-zero) connected\n   components in the black (zero) background\n@see cv::findContours, cvStartFindContours, cvFindNextContour, cvSubstituteContour, cvEndFindContours\n*/\nCVAPI(int)  cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** first_contour,\n                            int header_size CV_DEFAULT(sizeof(CvContour)),\n                            int mode CV_DEFAULT(CV_RETR_LIST),\n                            int method CV_DEFAULT(CV_CHAIN_APPROX_SIMPLE),\n                            CvPoint offset CV_DEFAULT(cvPoint(0,0)));\n\n/** @brief Initializes contour retrieving process.\n\n   Calls cvStartFindContours.\n   Calls cvFindNextContour until null pointer is returned\n   or some other condition becomes true.\n   Calls cvEndFindContours at the end.\n@see cvFindContours\n*/\nCVAPI(CvContourScanner)  cvStartFindContours( CvArr* image, CvMemStorage* storage,\n                            int header_size CV_DEFAULT(sizeof(CvContour)),\n                            int mode CV_DEFAULT(CV_RETR_LIST),\n                            int method CV_DEFAULT(CV_CHAIN_APPROX_SIMPLE),\n                            CvPoint offset CV_DEFAULT(cvPoint(0,0)));\n\n/** @brief Retrieves next contour\n@see cvFindContours\n*/\nCVAPI(CvSeq*)  cvFindNextContour( CvContourScanner scanner );\n\n\n/** @brief Substitutes the last retrieved contour with the new one\n\n   (if the substitutor is null, the last retrieved contour is removed from the tree)\n@see cvFindContours\n*/\nCVAPI(void)   cvSubstituteContour( CvContourScanner scanner, CvSeq* new_contour );\n\n\n/** @brief Releases contour scanner and returns pointer to the first outer contour\n@see cvFindContours\n*/\nCVAPI(CvSeq*)  cvEndFindContours( CvContourScanner* scanner );\n\n/** @brief Approximates Freeman chain(s) with a polygonal curve.\n\nThis is a standalone contour approximation routine, not represented in the new interface. When\ncvFindContours retrieves contours as Freeman chains, it calls the function to get approximated\ncontours, represented as polygons.\n\n@param src_seq Pointer to the approximated Freeman chain that can refer to other chains.\n@param storage Storage location for the resulting polylines.\n@param method Approximation method (see the description of the function :ocvFindContours ).\n@param parameter Method parameter (not used now).\n@param minimal_perimeter Approximates only those contours whose perimeters are not less than\nminimal_perimeter . Other chains are removed from the resulting structure.\n@param recursive Recursion flag. If it is non-zero, the function approximates all chains that can\nbe obtained from chain by using the h_next or v_next links. Otherwise, the single input chain is\napproximated.\n@see cvStartReadChainPoints, cvReadChainPoint\n */\nCVAPI(CvSeq*) cvApproxChains( CvSeq* src_seq, CvMemStorage* storage,\n                            int method CV_DEFAULT(CV_CHAIN_APPROX_SIMPLE),\n                            double parameter CV_DEFAULT(0),\n                            int  minimal_perimeter CV_DEFAULT(0),\n                            int  recursive CV_DEFAULT(0));\n\n/** @brief Initializes Freeman chain reader.\n\n   The reader is used to iteratively get coordinates of all the chain points.\n   If the Freeman codes should be read as is, a simple sequence reader should be used\n@see cvApproxChains\n*/\nCVAPI(void) cvStartReadChainPoints( CvChain* chain, CvChainPtReader* reader );\n\n/** @brief Retrieves the next chain point\n@see cvApproxChains\n*/\nCVAPI(CvPoint) cvReadChainPoint( CvChainPtReader* reader );\n\n\n/****************************************************************************************\\\n*                            Contour Processing and Shape Analysis                       *\n\\****************************************************************************************/\n\n/** @brief Approximates a single polygonal curve (contour) or\n   a tree of polygonal curves (contours)\n@see cv::approxPolyDP\n*/\nCVAPI(CvSeq*)  cvApproxPoly( const void* src_seq,\n                             int header_size, CvMemStorage* storage,\n                             int method, double eps,\n                             int recursive CV_DEFAULT(0));\n\n/** @brief Calculates perimeter of a contour or length of a part of contour\n@see cv::arcLength\n*/\nCVAPI(double)  cvArcLength( const void* curve,\n                            CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ),\n                            int is_closed CV_DEFAULT(-1));\n\n/** same as cvArcLength for closed contour\n*/\nCV_INLINE double cvContourPerimeter( const void* contour )\n{\n    return cvArcLength( contour, CV_WHOLE_SEQ, 1 );\n}\n\n\n/** @brief Calculates contour bounding rectangle (update=1) or\n   just retrieves pre-calculated rectangle (update=0)\n@see cv::boundingRect\n*/\nCVAPI(CvRect)  cvBoundingRect( CvArr* points, int update CV_DEFAULT(0) );\n\n/** @brief Calculates area of a contour or contour segment\n@see cv::contourArea\n*/\nCVAPI(double)  cvContourArea( const CvArr* contour,\n                              CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ),\n                              int oriented CV_DEFAULT(0));\n\n/** @brief Finds minimum area rotated rectangle bounding a set of points\n@see cv::minAreaRect\n*/\nCVAPI(CvBox2D)  cvMinAreaRect2( const CvArr* points,\n                                CvMemStorage* storage CV_DEFAULT(NULL));\n\n/** @brief Finds minimum enclosing circle for a set of points\n@see cv::minEnclosingCircle\n*/\nCVAPI(int)  cvMinEnclosingCircle( const CvArr* points,\n                                  CvPoint2D32f* center, float* radius );\n\n/** @brief Compares two contours by matching their moments\n@see cv::matchShapes\n*/\nCVAPI(double)  cvMatchShapes( const void* object1, const void* object2,\n                              int method, double parameter CV_DEFAULT(0));\n\n/** @brief Calculates exact convex hull of 2d point set\n@see cv::convexHull\n*/\nCVAPI(CvSeq*) cvConvexHull2( const CvArr* input,\n                             void* hull_storage CV_DEFAULT(NULL),\n                             int orientation CV_DEFAULT(CV_CLOCKWISE),\n                             int return_points CV_DEFAULT(0));\n\n/** @brief Checks whether the contour is convex or not (returns 1 if convex, 0 if not)\n@see cv::isContourConvex\n*/\nCVAPI(int)  cvCheckContourConvexity( const CvArr* contour );\n\n\n/** @brief Finds convexity defects for the contour\n@see cv::convexityDefects\n*/\nCVAPI(CvSeq*)  cvConvexityDefects( const CvArr* contour, const CvArr* convexhull,\n                                   CvMemStorage* storage CV_DEFAULT(NULL));\n\n/** @brief Fits ellipse into a set of 2d points\n@see cv::fitEllipse\n*/\nCVAPI(CvBox2D) cvFitEllipse2( const CvArr* points );\n\n/** @brief Finds minimum rectangle containing two given rectangles */\nCVAPI(CvRect)  cvMaxRect( const CvRect* rect1, const CvRect* rect2 );\n\n/** @brief Finds coordinates of the box vertices */\nCVAPI(void) cvBoxPoints( CvBox2D box, CvPoint2D32f pt[4] );\n\n/** @brief Initializes sequence header for a matrix (column or row vector) of points\n\n   a wrapper for cvMakeSeqHeaderForArray (it does not initialize bounding rectangle!!!) */\nCVAPI(CvSeq*) cvPointSeqFromMat( int seq_kind, const CvArr* mat,\n                                 CvContour* contour_header,\n                                 CvSeqBlock* block );\n\n/** @brief Checks whether the point is inside polygon, outside, on an edge (at a vertex).\n\n   Returns positive, negative or zero value, correspondingly.\n   Optionally, measures a signed distance between\n   the point and the nearest polygon edge (measure_dist=1)\n@see cv::pointPolygonTest\n*/\nCVAPI(double) cvPointPolygonTest( const CvArr* contour,\n                                  CvPoint2D32f pt, int measure_dist );\n\n/****************************************************************************************\\\n*                                  Histogram functions                                   *\n\\****************************************************************************************/\n\n/** @brief Creates a histogram.\n\nThe function creates a histogram of the specified size and returns a pointer to the created\nhistogram. If the array ranges is 0, the histogram bin ranges must be specified later via the\nfunction cvSetHistBinRanges. Though cvCalcHist and cvCalcBackProject may process 8-bit images\nwithout setting bin ranges, they assume they are equally spaced in 0 to 255 bins.\n\n@param dims Number of histogram dimensions.\n@param sizes Array of the histogram dimension sizes.\n@param type Histogram representation format. CV_HIST_ARRAY means that the histogram data is\nrepresented as a multi-dimensional dense array CvMatND. CV_HIST_SPARSE means that histogram data\nis represented as a multi-dimensional sparse array CvSparseMat.\n@param ranges Array of ranges for the histogram bins. Its meaning depends on the uniform parameter\nvalue. The ranges are used when the histogram is calculated or backprojected to determine which\nhistogram bin corresponds to which value/tuple of values from the input image(s).\n@param uniform Uniformity flag. If not zero, the histogram has evenly spaced bins and for every\n\\f$0<=i<cDims\\f$ ranges[i] is an array of two numbers: lower and upper boundaries for the i-th\nhistogram dimension. The whole range [lower,upper] is then split into dims[i] equal parts to\ndetermine the i-th input tuple value ranges for every histogram bin. And if uniform=0 , then the\ni-th element of the ranges array contains dims[i]+1 elements: \\f$\\texttt{lower}_0,\n\\texttt{upper}_0, \\texttt{lower}_1, \\texttt{upper}_1 = \\texttt{lower}_2,\n...\n\\texttt{upper}_{dims[i]-1}\\f$ where \\f$\\texttt{lower}_j\\f$ and \\f$\\texttt{upper}_j\\f$ are lower\nand upper boundaries of the i-th input tuple value for the j-th bin, respectively. In either\ncase, the input values that are beyond the specified range for a histogram bin are not counted\nby cvCalcHist and filled with 0 by cvCalcBackProject.\n */\nCVAPI(CvHistogram*)  cvCreateHist( int dims, int* sizes, int type,\n                                   float** ranges CV_DEFAULT(NULL),\n                                   int uniform CV_DEFAULT(1));\n\n/** @brief Sets the bounds of the histogram bins.\n\nThis is a standalone function for setting bin ranges in the histogram. For a more detailed\ndescription of the parameters ranges and uniform, see the :ocvCalcHist function that can initialize\nthe ranges as well. Ranges for the histogram bins must be set before the histogram is calculated or\nthe backproject of the histogram is calculated.\n\n@param hist Histogram.\n@param ranges Array of bin ranges arrays. See :ocvCreateHist for details.\n@param uniform Uniformity flag. See :ocvCreateHist for details.\n */\nCVAPI(void)  cvSetHistBinRanges( CvHistogram* hist, float** ranges,\n                                int uniform CV_DEFAULT(1));\n\n/** @brief Makes a histogram out of an array.\n\nThe function initializes the histogram, whose header and bins are allocated by the user.\ncvReleaseHist does not need to be called afterwards. Only dense histograms can be initialized this\nway. The function returns hist.\n\n@param dims Number of the histogram dimensions.\n@param sizes Array of the histogram dimension sizes.\n@param hist Histogram header initialized by the function.\n@param data Array used to store histogram bins.\n@param ranges Histogram bin ranges. See cvCreateHist for details.\n@param uniform Uniformity flag. See cvCreateHist for details.\n */\nCVAPI(CvHistogram*)  cvMakeHistHeaderForArray(\n                            int  dims, int* sizes, CvHistogram* hist,\n                            float* data, float** ranges CV_DEFAULT(NULL),\n                            int uniform CV_DEFAULT(1));\n\n/** @brief Releases the histogram.\n\nThe function releases the histogram (header and the data). The pointer to the histogram is cleared\nby the function. If \\*hist pointer is already NULL, the function does nothing.\n\n@param hist Double pointer to the released histogram.\n */\nCVAPI(void)  cvReleaseHist( CvHistogram** hist );\n\n/** @brief Clears the histogram.\n\nThe function sets all of the histogram bins to 0 in case of a dense histogram and removes all\nhistogram bins in case of a sparse array.\n\n@param hist Histogram.\n */\nCVAPI(void)  cvClearHist( CvHistogram* hist );\n\n/** @brief Finds the minimum and maximum histogram bins.\n\nThe function finds the minimum and maximum histogram bins and their positions. All of output\narguments are optional. Among several extremas with the same value the ones with the minimum index\n(in the lexicographical order) are returned. In case of several maximums or minimums, the earliest\nin the lexicographical order (extrema locations) is returned.\n\n@param hist Histogram.\n@param min_value Pointer to the minimum value of the histogram.\n@param max_value Pointer to the maximum value of the histogram.\n@param min_idx Pointer to the array of coordinates for the minimum.\n@param max_idx Pointer to the array of coordinates for the maximum.\n */\nCVAPI(void)  cvGetMinMaxHistValue( const CvHistogram* hist,\n                                   float* min_value, float* max_value,\n                                   int* min_idx CV_DEFAULT(NULL),\n                                   int* max_idx CV_DEFAULT(NULL));\n\n\n/** @brief Normalizes the histogram.\n\nThe function normalizes the histogram bins by scaling them so that the sum of the bins becomes equal\nto factor.\n\n@param hist Pointer to the histogram.\n@param factor Normalization factor.\n */\nCVAPI(void)  cvNormalizeHist( CvHistogram* hist, double factor );\n\n\n/** @brief Thresholds the histogram.\n\nThe function clears histogram bins that are below the specified threshold.\n\n@param hist Pointer to the histogram.\n@param threshold Threshold level.\n */\nCVAPI(void)  cvThreshHist( CvHistogram* hist, double threshold );\n\n\n/** Compares two histogram */\nCVAPI(double)  cvCompareHist( const CvHistogram* hist1,\n                              const CvHistogram* hist2,\n                              int method);\n\n/** @brief Copies a histogram.\n\nThe function makes a copy of the histogram. If the second histogram pointer \\*dst is NULL, a new\nhistogram of the same size as src is created. Otherwise, both histograms must have equal types and\nsizes. Then the function copies the bin values of the source histogram to the destination histogram\nand sets the same bin value ranges as in src.\n\n@param src Source histogram.\n@param dst Pointer to the destination histogram.\n */\nCVAPI(void)  cvCopyHist( const CvHistogram* src, CvHistogram** dst );\n\n\n/** @brief Calculates bayesian probabilistic histograms\n   (each or src and dst is an array of _number_ histograms */\nCVAPI(void)  cvCalcBayesianProb( CvHistogram** src, int number,\n                                CvHistogram** dst);\n\n/** @brief Calculates array histogram\n@see cv::calcHist\n*/\nCVAPI(void)  cvCalcArrHist( CvArr** arr, CvHistogram* hist,\n                            int accumulate CV_DEFAULT(0),\n                            const CvArr* mask CV_DEFAULT(NULL) );\n\n/** @overload */\nCV_INLINE  void  cvCalcHist( IplImage** image, CvHistogram* hist,\n                             int accumulate CV_DEFAULT(0),\n                             const CvArr* mask CV_DEFAULT(NULL) )\n{\n    cvCalcArrHist( (CvArr**)image, hist, accumulate, mask );\n}\n\n/** @brief Calculates back project\n@see cvCalcBackProject, cv::calcBackProject\n*/\nCVAPI(void)  cvCalcArrBackProject( CvArr** image, CvArr* dst,\n                                   const CvHistogram* hist );\n\n#define  cvCalcBackProject(image, dst, hist) cvCalcArrBackProject((CvArr**)image, dst, hist)\n\n\n/** @brief Locates a template within an image by using a histogram comparison.\n\nThe function calculates the back projection by comparing histograms of the source image patches with\nthe given histogram. The function is similar to matchTemplate, but instead of comparing the raster\npatch with all its possible positions within the search window, the function CalcBackProjectPatch\ncompares histograms. See the algorithm diagram below:\n\n![image](pics/backprojectpatch.png)\n\n@param image Source images (though, you may pass CvMat\\*\\* as well).\n@param dst Destination image.\n@param range\n@param hist Histogram.\n@param method Comparison method passed to cvCompareHist (see the function description).\n@param factor Normalization factor for histograms that affects the normalization scale of the\ndestination image. Pass 1 if not sure.\n\n@see cvCalcBackProjectPatch\n */\nCVAPI(void)  cvCalcArrBackProjectPatch( CvArr** image, CvArr* dst, CvSize range,\n                                        CvHistogram* hist, int method,\n                                        double factor );\n\n#define  cvCalcBackProjectPatch( image, dst, range, hist, method, factor ) \\\n     cvCalcArrBackProjectPatch( (CvArr**)image, dst, range, hist, method, factor )\n\n\n/** @brief Divides one histogram by another.\n\nThe function calculates the object probability density from two histograms as:\n\n\\f[\\texttt{disthist} (I)= \\forkthree{0}{if \\(\\texttt{hist1}(I)=0\\)}{\\texttt{scale}}{if \\(\\texttt{hist1}(I) \\ne 0\\) and \\(\\texttt{hist2}(I) > \\texttt{hist1}(I)\\)}{\\frac{\\texttt{hist2}(I) \\cdot \\texttt{scale}}{\\texttt{hist1}(I)}}{if \\(\\texttt{hist1}(I) \\ne 0\\) and \\(\\texttt{hist2}(I) \\le \\texttt{hist1}(I)\\)}\\f]\n\n@param hist1 First histogram (the divisor).\n@param hist2 Second histogram.\n@param dst_hist Destination histogram.\n@param scale Scale factor for the destination histogram.\n */\nCVAPI(void)  cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2,\n                                CvHistogram* dst_hist, double scale CV_DEFAULT(255) );\n\n/** @brief equalizes histogram of 8-bit single-channel image\n@see cv::equalizeHist\n*/\nCVAPI(void)  cvEqualizeHist( const CvArr* src, CvArr* dst );\n\n\n/** @brief Applies distance transform to binary image\n@see cv::distanceTransform\n*/\nCVAPI(void)  cvDistTransform( const CvArr* src, CvArr* dst,\n                              int distance_type CV_DEFAULT(CV_DIST_L2),\n                              int mask_size CV_DEFAULT(3),\n                              const float* mask CV_DEFAULT(NULL),\n                              CvArr* labels CV_DEFAULT(NULL),\n                              int labelType CV_DEFAULT(CV_DIST_LABEL_CCOMP));\n\n\n/** @brief Applies fixed-level threshold to grayscale image.\n\n   This is a basic operation applied before retrieving contours\n@see cv::threshold\n*/\nCVAPI(double)  cvThreshold( const CvArr*  src, CvArr*  dst,\n                            double  threshold, double  max_value,\n                            int threshold_type );\n\n/** @brief Applies adaptive threshold to grayscale image.\n\n   The two parameters for methods CV_ADAPTIVE_THRESH_MEAN_C and\n   CV_ADAPTIVE_THRESH_GAUSSIAN_C are:\n   neighborhood size (3, 5, 7 etc.),\n   and a constant subtracted from mean (...,-3,-2,-1,0,1,2,3,...)\n@see cv::adaptiveThreshold\n*/\nCVAPI(void)  cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value,\n                                  int adaptive_method CV_DEFAULT(CV_ADAPTIVE_THRESH_MEAN_C),\n                                  int threshold_type CV_DEFAULT(CV_THRESH_BINARY),\n                                  int block_size CV_DEFAULT(3),\n                                  double param1 CV_DEFAULT(5));\n\n/** @brief Fills the connected component until the color difference gets large enough\n@see cv::floodFill\n*/\nCVAPI(void)  cvFloodFill( CvArr* image, CvPoint seed_point,\n                          CvScalar new_val, CvScalar lo_diff CV_DEFAULT(cvScalarAll(0)),\n                          CvScalar up_diff CV_DEFAULT(cvScalarAll(0)),\n                          CvConnectedComp* comp CV_DEFAULT(NULL),\n                          int flags CV_DEFAULT(4),\n                          CvArr* mask CV_DEFAULT(NULL));\n\n/****************************************************************************************\\\n*                                  Feature detection                                     *\n\\****************************************************************************************/\n\n/** @brief Runs canny edge detector\n@see cv::Canny\n*/\nCVAPI(void)  cvCanny( const CvArr* image, CvArr* edges, double threshold1,\n                      double threshold2, int  aperture_size CV_DEFAULT(3) );\n\n/** @brief Calculates constraint image for corner detection\n\n   Dx^2 * Dyy + Dxx * Dy^2 - 2 * Dx * Dy * Dxy.\n   Applying threshold to the result gives coordinates of corners\n@see cv::preCornerDetect\n*/\nCVAPI(void) cvPreCornerDetect( const CvArr* image, CvArr* corners,\n                               int aperture_size CV_DEFAULT(3) );\n\n/** @brief Calculates eigen values and vectors of 2x2\n   gradient covariation matrix at every image pixel\n@see cv::cornerEigenValsAndVecs\n*/\nCVAPI(void)  cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv,\n                                       int block_size, int aperture_size CV_DEFAULT(3) );\n\n/** @brief Calculates minimal eigenvalue for 2x2 gradient covariation matrix at\n   every image pixel\n@see cv::cornerMinEigenVal\n*/\nCVAPI(void)  cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval,\n                                  int block_size, int aperture_size CV_DEFAULT(3) );\n\n/** @brief Harris corner detector:\n\n   Calculates det(M) - k*(trace(M)^2), where M is 2x2 gradient covariation matrix for each pixel\n@see cv::cornerHarris\n*/\nCVAPI(void)  cvCornerHarris( const CvArr* image, CvArr* harris_response,\n                             int block_size, int aperture_size CV_DEFAULT(3),\n                             double k CV_DEFAULT(0.04) );\n\n/** @brief Adjust corner position using some sort of gradient search\n@see cv::cornerSubPix\n*/\nCVAPI(void)  cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners,\n                                 int count, CvSize win, CvSize zero_zone,\n                                 CvTermCriteria  criteria );\n\n/** @brief Finds a sparse set of points within the selected region\n   that seem to be easy to track\n@see cv::goodFeaturesToTrack\n*/\nCVAPI(void)  cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image,\n                                    CvArr* temp_image, CvPoint2D32f* corners,\n                                    int* corner_count, double  quality_level,\n                                    double  min_distance,\n                                    const CvArr* mask CV_DEFAULT(NULL),\n                                    int block_size CV_DEFAULT(3),\n                                    int use_harris CV_DEFAULT(0),\n                                    double k CV_DEFAULT(0.04) );\n\n/** @brief Finds lines on binary image using one of several methods.\n\n   line_storage is either memory storage or 1 x _max number of lines_ CvMat, its\n   number of columns is changed by the function.\n   method is one of CV_HOUGH_*;\n   rho, theta and threshold are used for each of those methods;\n   param1 ~ line length, param2 ~ line gap - for probabilistic,\n   param1 ~ srn, param2 ~ stn - for multi-scale\n@see cv::HoughLines\n*/\nCVAPI(CvSeq*)  cvHoughLines2( CvArr* image, void* line_storage, int method,\n                              double rho, double theta, int threshold,\n                              double param1 CV_DEFAULT(0), double param2 CV_DEFAULT(0),\n                              double min_theta CV_DEFAULT(0), double max_theta CV_DEFAULT(CV_PI));\n\n/** @brief Finds circles in the image\n@see cv::HoughCircles\n*/\nCVAPI(CvSeq*) cvHoughCircles( CvArr* image, void* circle_storage,\n                              int method, double dp, double min_dist,\n                              double param1 CV_DEFAULT(100),\n                              double param2 CV_DEFAULT(100),\n                              int min_radius CV_DEFAULT(0),\n                              int max_radius CV_DEFAULT(0));\n\n/** @brief Fits a line into set of 2d or 3d points in a robust way (M-estimator technique)\n@see cv::fitLine\n*/\nCVAPI(void)  cvFitLine( const CvArr* points, int dist_type, double param,\n                        double reps, double aeps, float* line );\n\n/****************************************************************************************\\\n*                                     Drawing                                            *\n\\****************************************************************************************/\n\n/****************************************************************************************\\\n*       Drawing functions work with images/matrices of arbitrary type.                   *\n*       For color images the channel order is BGR[A]                                     *\n*       Antialiasing is supported only for 8-bit image now.                              *\n*       All the functions include parameter color that means rgb value (that may be      *\n*       constructed with CV_RGB macro) for color images and brightness                   *\n*       for grayscale images.                                                            *\n*       If a drawn figure is partially or completely outside of the image, it is clipped.*\n\\****************************************************************************************/\n\n#define CV_RGB( r, g, b )  cvScalar( (b), (g), (r), 0 )\n#define CV_FILLED -1\n\n#define CV_AA 16\n\n/** @brief Draws 4-connected, 8-connected or antialiased line segment connecting two points\n@see cv::line\n*/\nCVAPI(void)  cvLine( CvArr* img, CvPoint pt1, CvPoint pt2,\n                     CvScalar color, int thickness CV_DEFAULT(1),\n                     int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) );\n\n/** @brief Draws a rectangle given two opposite corners of the rectangle (pt1 & pt2)\n\n   if thickness<0 (e.g. thickness == CV_FILLED), the filled box is drawn\n@see cv::rectangle\n*/\nCVAPI(void)  cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2,\n                          CvScalar color, int thickness CV_DEFAULT(1),\n                          int line_type CV_DEFAULT(8),\n                          int shift CV_DEFAULT(0));\n\n/** @brief Draws a rectangle specified by a CvRect structure\n@see cv::rectangle\n*/\nCVAPI(void)  cvRectangleR( CvArr* img, CvRect r,\n                           CvScalar color, int thickness CV_DEFAULT(1),\n                           int line_type CV_DEFAULT(8),\n                           int shift CV_DEFAULT(0));\n\n\n/** @brief Draws a circle with specified center and radius.\n\n   Thickness works in the same way as with cvRectangle\n@see cv::circle\n*/\nCVAPI(void)  cvCircle( CvArr* img, CvPoint center, int radius,\n                       CvScalar color, int thickness CV_DEFAULT(1),\n                       int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0));\n\n/** @brief Draws ellipse outline, filled ellipse, elliptic arc or filled elliptic sector\n\n   depending on _thickness_, _start_angle_ and _end_angle_ parameters. The resultant figure\n   is rotated by _angle_. All the angles are in degrees\n@see cv::ellipse\n*/\nCVAPI(void)  cvEllipse( CvArr* img, CvPoint center, CvSize axes,\n                        double angle, double start_angle, double end_angle,\n                        CvScalar color, int thickness CV_DEFAULT(1),\n                        int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0));\n\nCV_INLINE  void  cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color,\n                               int thickness CV_DEFAULT(1),\n                               int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) )\n{\n    CvSize axes;\n    axes.width = cvRound(box.size.width*0.5);\n    axes.height = cvRound(box.size.height*0.5);\n\n    cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle,\n               0, 360, color, thickness, line_type, shift );\n}\n\n/** @brief Fills convex or monotonous polygon.\n@see cv::fillConvexPoly\n*/\nCVAPI(void)  cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color,\n                               int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0));\n\n/** @brief Fills an area bounded by one or more arbitrary polygons\n@see cv::fillPoly\n*/\nCVAPI(void)  cvFillPoly( CvArr* img, CvPoint** pts, const int* npts,\n                         int contours, CvScalar color,\n                         int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) );\n\n/** @brief Draws one or more polygonal curves\n@see cv::polylines\n*/\nCVAPI(void)  cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours,\n                         int is_closed, CvScalar color, int thickness CV_DEFAULT(1),\n                         int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) );\n\n#define cvDrawRect cvRectangle\n#define cvDrawLine cvLine\n#define cvDrawCircle cvCircle\n#define cvDrawEllipse cvEllipse\n#define cvDrawPolyLine cvPolyLine\n\n/** @brief Clips the line segment connecting *pt1 and *pt2\n   by the rectangular window\n\n   (0<=x<img_size.width, 0<=y<img_size.height).\n@see cv::clipLine\n*/\nCVAPI(int) cvClipLine( CvSize img_size, CvPoint* pt1, CvPoint* pt2 );\n\n/** @brief Initializes line iterator.\n\nInitially, line_iterator->ptr will point to pt1 (or pt2, see left_to_right description) location in\nthe image. Returns the number of pixels on the line between the ending points.\n@see cv::LineIterator\n*/\nCVAPI(int)  cvInitLineIterator( const CvArr* image, CvPoint pt1, CvPoint pt2,\n                                CvLineIterator* line_iterator,\n                                int connectivity CV_DEFAULT(8),\n                                int left_to_right CV_DEFAULT(0));\n\n#define CV_NEXT_LINE_POINT( line_iterator )                     \\\n{                                                               \\\n    int _line_iterator_mask = (line_iterator).err < 0 ? -1 : 0; \\\n    (line_iterator).err += (line_iterator).minus_delta +        \\\n        ((line_iterator).plus_delta & _line_iterator_mask);     \\\n    (line_iterator).ptr += (line_iterator).minus_step +         \\\n        ((line_iterator).plus_step & _line_iterator_mask);      \\\n}\n\n\n#define CV_FONT_HERSHEY_SIMPLEX         0\n#define CV_FONT_HERSHEY_PLAIN           1\n#define CV_FONT_HERSHEY_DUPLEX          2\n#define CV_FONT_HERSHEY_COMPLEX         3\n#define CV_FONT_HERSHEY_TRIPLEX         4\n#define CV_FONT_HERSHEY_COMPLEX_SMALL   5\n#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX  6\n#define CV_FONT_HERSHEY_SCRIPT_COMPLEX  7\n\n#define CV_FONT_ITALIC                 16\n\n#define CV_FONT_VECTOR0    CV_FONT_HERSHEY_SIMPLEX\n\n\n/** Font structure */\ntypedef struct CvFont\n{\n  const char* nameFont;   //Qt:nameFont\n  CvScalar color;       //Qt:ColorFont -> cvScalar(blue_component, green_component, red_component[, alpha_component])\n    int         font_face;    //Qt: bool italic         /** =CV_FONT_* */\n    const int*  ascii;      //!< font data and metrics\n    const int*  greek;\n    const int*  cyrillic;\n    float       hscale, vscale;\n    float       shear;      //!< slope coefficient: 0 - normal, >0 - italic\n    int         thickness;    //!< Qt: weight               /** letters thickness */\n    float       dx;       //!< horizontal interval between letters\n    int         line_type;    //!< Qt: PointSize\n}\nCvFont;\n\n/** @brief Initializes font structure (OpenCV 1.x API).\n\nThe function initializes the font structure that can be passed to text rendering functions.\n\n@param font Pointer to the font structure initialized by the function\n@param font_face Font name identifier. See cv::HersheyFonts and corresponding old CV_* identifiers.\n@param hscale Horizontal scale. If equal to 1.0f , the characters have the original width\ndepending on the font type. If equal to 0.5f , the characters are of half the original width.\n@param vscale Vertical scale. If equal to 1.0f , the characters have the original height depending\non the font type. If equal to 0.5f , the characters are of half the original height.\n@param shear Approximate tangent of the character slope relative to the vertical line. A zero\nvalue means a non-italic font, 1.0f means about a 45 degree slope, etc.\n@param thickness Thickness of the text strokes\n@param line_type Type of the strokes, see line description\n\n@sa cvPutText\n */\nCVAPI(void)  cvInitFont( CvFont* font, int font_face,\n                         double hscale, double vscale,\n                         double shear CV_DEFAULT(0),\n                         int thickness CV_DEFAULT(1),\n                         int line_type CV_DEFAULT(8));\n\nCV_INLINE CvFont cvFont( double scale, int thickness CV_DEFAULT(1) )\n{\n    CvFont font;\n    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, scale, scale, 0, thickness, CV_AA );\n    return font;\n}\n\n/** @brief Renders text stroke with specified font and color at specified location.\n   CvFont should be initialized with cvInitFont\n@see cvInitFont, cvGetTextSize, cvFont, cv::putText\n*/\nCVAPI(void)  cvPutText( CvArr* img, const char* text, CvPoint org,\n                        const CvFont* font, CvScalar color );\n\n/** @brief Calculates bounding box of text stroke (useful for alignment)\n@see cv::getTextSize\n*/\nCVAPI(void)  cvGetTextSize( const char* text_string, const CvFont* font,\n                            CvSize* text_size, int* baseline );\n\n/** @brief Unpacks color value\n\nif arrtype is CV_8UC?, _color_ is treated as packed color value, otherwise the first channels\n(depending on arrtype) of destination scalar are set to the same value = _color_\n*/\nCVAPI(CvScalar)  cvColorToScalar( double packed_color, int arrtype );\n\n/** @brief Returns the polygon points which make up the given ellipse.\n\nThe ellipse is define by the box of size 'axes' rotated 'angle' around the 'center'. A partial\nsweep of the ellipse arc can be done by spcifying arc_start and arc_end to be something other than\n0 and 360, respectively. The input array 'pts' must be large enough to hold the result. The total\nnumber of points stored into 'pts' is returned by this function.\n@see cv::ellipse2Poly\n*/\nCVAPI(int) cvEllipse2Poly( CvPoint center, CvSize axes,\n                 int angle, int arc_start, int arc_end, CvPoint * pts, int delta );\n\n/** @brief Draws contour outlines or filled interiors on the image\n@see cv::drawContours\n*/\nCVAPI(void)  cvDrawContours( CvArr *img, CvSeq* contour,\n                             CvScalar external_color, CvScalar hole_color,\n                             int max_level, int thickness CV_DEFAULT(1),\n                             int line_type CV_DEFAULT(8),\n                             CvPoint offset CV_DEFAULT(cvPoint(0,0)));\n\n/** @} */\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/imgproc/types_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_IMGPROC_TYPES_C_H__\n#define __OPENCV_IMGPROC_TYPES_C_H__\n\n#include \"opencv2/core/core_c.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/** @addtogroup imgproc_c\n  @{\n*/\n\n/** Connected component structure */\ntypedef struct CvConnectedComp\n{\n    double area;    /**<area of the connected component  */\n    CvScalar value; /**<average color of the connected component */\n    CvRect rect;    /**<ROI of the component  */\n    CvSeq* contour; /**<optional component boundary\n                      (the contour might have child contours corresponding to the holes)*/\n}\nCvConnectedComp;\n\n/** Image smooth methods */\nenum SmoothMethod_c\n{\n    /** linear convolution with \\f$\\texttt{size1}\\times\\texttt{size2}\\f$ box kernel (all 1's). If\n    you want to smooth different pixels with different-size box kernels, you can use the integral\n    image that is computed using integral */\n    CV_BLUR_NO_SCALE =0,\n    /** linear convolution with \\f$\\texttt{size1}\\times\\texttt{size2}\\f$ box kernel (all\n    1's) with subsequent scaling by \\f$1/(\\texttt{size1}\\cdot\\texttt{size2})\\f$ */\n    CV_BLUR  =1,\n    /** linear convolution with a \\f$\\texttt{size1}\\times\\texttt{size2}\\f$ Gaussian kernel */\n    CV_GAUSSIAN  =2,\n    /** median filter with a \\f$\\texttt{size1}\\times\\texttt{size1}\\f$ square aperture */\n    CV_MEDIAN =3,\n    /** bilateral filter with a \\f$\\texttt{size1}\\times\\texttt{size1}\\f$ square aperture, color\n    sigma= sigma1 and spatial sigma= sigma2. If size1=0, the aperture square side is set to\n    cvRound(sigma2\\*1.5)\\*2+1. See cv::bilateralFilter */\n    CV_BILATERAL =4\n};\n\n/** Filters used in pyramid decomposition */\nenum\n{\n    CV_GAUSSIAN_5x5 = 7\n};\n\n/** Special filters */\nenum\n{\n    CV_SCHARR =-1,\n    CV_MAX_SOBEL_KSIZE =7\n};\n\n/** Constants for color conversion */\nenum\n{\n    CV_BGR2BGRA    =0,\n    CV_RGB2RGBA    =CV_BGR2BGRA,\n\n    CV_BGRA2BGR    =1,\n    CV_RGBA2RGB    =CV_BGRA2BGR,\n\n    CV_BGR2RGBA    =2,\n    CV_RGB2BGRA    =CV_BGR2RGBA,\n\n    CV_RGBA2BGR    =3,\n    CV_BGRA2RGB    =CV_RGBA2BGR,\n\n    CV_BGR2RGB     =4,\n    CV_RGB2BGR     =CV_BGR2RGB,\n\n    CV_BGRA2RGBA   =5,\n    CV_RGBA2BGRA   =CV_BGRA2RGBA,\n\n    CV_BGR2GRAY    =6,\n    CV_RGB2GRAY    =7,\n    CV_GRAY2BGR    =8,\n    CV_GRAY2RGB    =CV_GRAY2BGR,\n    CV_GRAY2BGRA   =9,\n    CV_GRAY2RGBA   =CV_GRAY2BGRA,\n    CV_BGRA2GRAY   =10,\n    CV_RGBA2GRAY   =11,\n\n    CV_BGR2BGR565  =12,\n    CV_RGB2BGR565  =13,\n    CV_BGR5652BGR  =14,\n    CV_BGR5652RGB  =15,\n    CV_BGRA2BGR565 =16,\n    CV_RGBA2BGR565 =17,\n    CV_BGR5652BGRA =18,\n    CV_BGR5652RGBA =19,\n\n    CV_GRAY2BGR565 =20,\n    CV_BGR5652GRAY =21,\n\n    CV_BGR2BGR555  =22,\n    CV_RGB2BGR555  =23,\n    CV_BGR5552BGR  =24,\n    CV_BGR5552RGB  =25,\n    CV_BGRA2BGR555 =26,\n    CV_RGBA2BGR555 =27,\n    CV_BGR5552BGRA =28,\n    CV_BGR5552RGBA =29,\n\n    CV_GRAY2BGR555 =30,\n    CV_BGR5552GRAY =31,\n\n    CV_BGR2XYZ     =32,\n    CV_RGB2XYZ     =33,\n    CV_XYZ2BGR     =34,\n    CV_XYZ2RGB     =35,\n\n    CV_BGR2YCrCb   =36,\n    CV_RGB2YCrCb   =37,\n    CV_YCrCb2BGR   =38,\n    CV_YCrCb2RGB   =39,\n\n    CV_BGR2HSV     =40,\n    CV_RGB2HSV     =41,\n\n    CV_BGR2Lab     =44,\n    CV_RGB2Lab     =45,\n\n    CV_BayerBG2BGR =46,\n    CV_BayerGB2BGR =47,\n    CV_BayerRG2BGR =48,\n    CV_BayerGR2BGR =49,\n\n    CV_BayerBG2RGB =CV_BayerRG2BGR,\n    CV_BayerGB2RGB =CV_BayerGR2BGR,\n    CV_BayerRG2RGB =CV_BayerBG2BGR,\n    CV_BayerGR2RGB =CV_BayerGB2BGR,\n\n    CV_BGR2Luv     =50,\n    CV_RGB2Luv     =51,\n    CV_BGR2HLS     =52,\n    CV_RGB2HLS     =53,\n\n    CV_HSV2BGR     =54,\n    CV_HSV2RGB     =55,\n\n    CV_Lab2BGR     =56,\n    CV_Lab2RGB     =57,\n    CV_Luv2BGR     =58,\n    CV_Luv2RGB     =59,\n    CV_HLS2BGR     =60,\n    CV_HLS2RGB     =61,\n\n    CV_BayerBG2BGR_VNG =62,\n    CV_BayerGB2BGR_VNG =63,\n    CV_BayerRG2BGR_VNG =64,\n    CV_BayerGR2BGR_VNG =65,\n\n    CV_BayerBG2RGB_VNG =CV_BayerRG2BGR_VNG,\n    CV_BayerGB2RGB_VNG =CV_BayerGR2BGR_VNG,\n    CV_BayerRG2RGB_VNG =CV_BayerBG2BGR_VNG,\n    CV_BayerGR2RGB_VNG =CV_BayerGB2BGR_VNG,\n\n    CV_BGR2HSV_FULL = 66,\n    CV_RGB2HSV_FULL = 67,\n    CV_BGR2HLS_FULL = 68,\n    CV_RGB2HLS_FULL = 69,\n\n    CV_HSV2BGR_FULL = 70,\n    CV_HSV2RGB_FULL = 71,\n    CV_HLS2BGR_FULL = 72,\n    CV_HLS2RGB_FULL = 73,\n\n    CV_LBGR2Lab     = 74,\n    CV_LRGB2Lab     = 75,\n    CV_LBGR2Luv     = 76,\n    CV_LRGB2Luv     = 77,\n\n    CV_Lab2LBGR     = 78,\n    CV_Lab2LRGB     = 79,\n    CV_Luv2LBGR     = 80,\n    CV_Luv2LRGB     = 81,\n\n    CV_BGR2YUV      = 82,\n    CV_RGB2YUV      = 83,\n    CV_YUV2BGR      = 84,\n    CV_YUV2RGB      = 85,\n\n    CV_BayerBG2GRAY = 86,\n    CV_BayerGB2GRAY = 87,\n    CV_BayerRG2GRAY = 88,\n    CV_BayerGR2GRAY = 89,\n\n    //YUV 4:2:0 formats family\n    CV_YUV2RGB_NV12 = 90,\n    CV_YUV2BGR_NV12 = 91,\n    CV_YUV2RGB_NV21 = 92,\n    CV_YUV2BGR_NV21 = 93,\n    CV_YUV420sp2RGB = CV_YUV2RGB_NV21,\n    CV_YUV420sp2BGR = CV_YUV2BGR_NV21,\n\n    CV_YUV2RGBA_NV12 = 94,\n    CV_YUV2BGRA_NV12 = 95,\n    CV_YUV2RGBA_NV21 = 96,\n    CV_YUV2BGRA_NV21 = 97,\n    CV_YUV420sp2RGBA = CV_YUV2RGBA_NV21,\n    CV_YUV420sp2BGRA = CV_YUV2BGRA_NV21,\n\n    CV_YUV2RGB_YV12 = 98,\n    CV_YUV2BGR_YV12 = 99,\n    CV_YUV2RGB_IYUV = 100,\n    CV_YUV2BGR_IYUV = 101,\n    CV_YUV2RGB_I420 = CV_YUV2RGB_IYUV,\n    CV_YUV2BGR_I420 = CV_YUV2BGR_IYUV,\n    CV_YUV420p2RGB = CV_YUV2RGB_YV12,\n    CV_YUV420p2BGR = CV_YUV2BGR_YV12,\n\n    CV_YUV2RGBA_YV12 = 102,\n    CV_YUV2BGRA_YV12 = 103,\n    CV_YUV2RGBA_IYUV = 104,\n    CV_YUV2BGRA_IYUV = 105,\n    CV_YUV2RGBA_I420 = CV_YUV2RGBA_IYUV,\n    CV_YUV2BGRA_I420 = CV_YUV2BGRA_IYUV,\n    CV_YUV420p2RGBA = CV_YUV2RGBA_YV12,\n    CV_YUV420p2BGRA = CV_YUV2BGRA_YV12,\n\n    CV_YUV2GRAY_420 = 106,\n    CV_YUV2GRAY_NV21 = CV_YUV2GRAY_420,\n    CV_YUV2GRAY_NV12 = CV_YUV2GRAY_420,\n    CV_YUV2GRAY_YV12 = CV_YUV2GRAY_420,\n    CV_YUV2GRAY_IYUV = CV_YUV2GRAY_420,\n    CV_YUV2GRAY_I420 = CV_YUV2GRAY_420,\n    CV_YUV420sp2GRAY = CV_YUV2GRAY_420,\n    CV_YUV420p2GRAY = CV_YUV2GRAY_420,\n\n    //YUV 4:2:2 formats family\n    CV_YUV2RGB_UYVY = 107,\n    CV_YUV2BGR_UYVY = 108,\n    //CV_YUV2RGB_VYUY = 109,\n    //CV_YUV2BGR_VYUY = 110,\n    CV_YUV2RGB_Y422 = CV_YUV2RGB_UYVY,\n    CV_YUV2BGR_Y422 = CV_YUV2BGR_UYVY,\n    CV_YUV2RGB_UYNV = CV_YUV2RGB_UYVY,\n    CV_YUV2BGR_UYNV = CV_YUV2BGR_UYVY,\n\n    CV_YUV2RGBA_UYVY = 111,\n    CV_YUV2BGRA_UYVY = 112,\n    //CV_YUV2RGBA_VYUY = 113,\n    //CV_YUV2BGRA_VYUY = 114,\n    CV_YUV2RGBA_Y422 = CV_YUV2RGBA_UYVY,\n    CV_YUV2BGRA_Y422 = CV_YUV2BGRA_UYVY,\n    CV_YUV2RGBA_UYNV = CV_YUV2RGBA_UYVY,\n    CV_YUV2BGRA_UYNV = CV_YUV2BGRA_UYVY,\n\n    CV_YUV2RGB_YUY2 = 115,\n    CV_YUV2BGR_YUY2 = 116,\n    CV_YUV2RGB_YVYU = 117,\n    CV_YUV2BGR_YVYU = 118,\n    CV_YUV2RGB_YUYV = CV_YUV2RGB_YUY2,\n    CV_YUV2BGR_YUYV = CV_YUV2BGR_YUY2,\n    CV_YUV2RGB_YUNV = CV_YUV2RGB_YUY2,\n    CV_YUV2BGR_YUNV = CV_YUV2BGR_YUY2,\n\n    CV_YUV2RGBA_YUY2 = 119,\n    CV_YUV2BGRA_YUY2 = 120,\n    CV_YUV2RGBA_YVYU = 121,\n    CV_YUV2BGRA_YVYU = 122,\n    CV_YUV2RGBA_YUYV = CV_YUV2RGBA_YUY2,\n    CV_YUV2BGRA_YUYV = CV_YUV2BGRA_YUY2,\n    CV_YUV2RGBA_YUNV = CV_YUV2RGBA_YUY2,\n    CV_YUV2BGRA_YUNV = CV_YUV2BGRA_YUY2,\n\n    CV_YUV2GRAY_UYVY = 123,\n    CV_YUV2GRAY_YUY2 = 124,\n    //CV_YUV2GRAY_VYUY = CV_YUV2GRAY_UYVY,\n    CV_YUV2GRAY_Y422 = CV_YUV2GRAY_UYVY,\n    CV_YUV2GRAY_UYNV = CV_YUV2GRAY_UYVY,\n    CV_YUV2GRAY_YVYU = CV_YUV2GRAY_YUY2,\n    CV_YUV2GRAY_YUYV = CV_YUV2GRAY_YUY2,\n    CV_YUV2GRAY_YUNV = CV_YUV2GRAY_YUY2,\n\n    // alpha premultiplication\n    CV_RGBA2mRGBA = 125,\n    CV_mRGBA2RGBA = 126,\n\n    CV_RGB2YUV_I420 = 127,\n    CV_BGR2YUV_I420 = 128,\n    CV_RGB2YUV_IYUV = CV_RGB2YUV_I420,\n    CV_BGR2YUV_IYUV = CV_BGR2YUV_I420,\n\n    CV_RGBA2YUV_I420 = 129,\n    CV_BGRA2YUV_I420 = 130,\n    CV_RGBA2YUV_IYUV = CV_RGBA2YUV_I420,\n    CV_BGRA2YUV_IYUV = CV_BGRA2YUV_I420,\n    CV_RGB2YUV_YV12  = 131,\n    CV_BGR2YUV_YV12  = 132,\n    CV_RGBA2YUV_YV12 = 133,\n    CV_BGRA2YUV_YV12 = 134,\n\n    // Edge-Aware Demosaicing\n    CV_BayerBG2BGR_EA = 135,\n    CV_BayerGB2BGR_EA = 136,\n    CV_BayerRG2BGR_EA = 137,\n    CV_BayerGR2BGR_EA = 138,\n\n    CV_BayerBG2RGB_EA = CV_BayerRG2BGR_EA,\n    CV_BayerGB2RGB_EA = CV_BayerGR2BGR_EA,\n    CV_BayerRG2RGB_EA = CV_BayerBG2BGR_EA,\n    CV_BayerGR2RGB_EA = CV_BayerGB2BGR_EA,\n\n    CV_COLORCVT_MAX  = 139\n};\n\n\n/** Sub-pixel interpolation methods */\nenum\n{\n    CV_INTER_NN        =0,\n    CV_INTER_LINEAR    =1,\n    CV_INTER_CUBIC     =2,\n    CV_INTER_AREA      =3,\n    CV_INTER_LANCZOS4  =4\n};\n\n/** ... and other image warping flags */\nenum\n{\n    CV_WARP_FILL_OUTLIERS =8,\n    CV_WARP_INVERSE_MAP  =16\n};\n\n/** Shapes of a structuring element for morphological operations\n@see cv::MorphShapes, cv::getStructuringElement\n*/\nenum MorphShapes_c\n{\n    CV_SHAPE_RECT      =0,\n    CV_SHAPE_CROSS     =1,\n    CV_SHAPE_ELLIPSE   =2,\n    CV_SHAPE_CUSTOM    =100 //!< custom structuring element\n};\n\n/** Morphological operations */\nenum\n{\n    CV_MOP_ERODE        =0,\n    CV_MOP_DILATE       =1,\n    CV_MOP_OPEN         =2,\n    CV_MOP_CLOSE        =3,\n    CV_MOP_GRADIENT     =4,\n    CV_MOP_TOPHAT       =5,\n    CV_MOP_BLACKHAT     =6\n};\n\n/** Spatial and central moments */\ntypedef struct CvMoments\n{\n    double  m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /**< spatial moments */\n    double  mu20, mu11, mu02, mu30, mu21, mu12, mu03; /**< central moments */\n    double  inv_sqrt_m00; /**< m00 != 0 ? 1/sqrt(m00) : 0 */\n\n#ifdef __cplusplus\n    CvMoments(){}\n    CvMoments(const cv::Moments& m)\n    {\n        m00 = m.m00; m10 = m.m10; m01 = m.m01;\n        m20 = m.m20; m11 = m.m11; m02 = m.m02;\n        m30 = m.m30; m21 = m.m21; m12 = m.m12; m03 = m.m03;\n        mu20 = m.mu20; mu11 = m.mu11; mu02 = m.mu02;\n        mu30 = m.mu30; mu21 = m.mu21; mu12 = m.mu12; mu03 = m.mu03;\n        double am00 = std::abs(m.m00);\n        inv_sqrt_m00 = am00 > DBL_EPSILON ? 1./std::sqrt(am00) : 0;\n    }\n    operator cv::Moments() const\n    {\n        return cv::Moments(m00, m10, m01, m20, m11, m02, m30, m21, m12, m03);\n    }\n#endif\n}\nCvMoments;\n\n/** Hu invariants */\ntypedef struct CvHuMoments\n{\n    double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /**< Hu invariants */\n}\nCvHuMoments;\n\n/** Template matching methods */\nenum\n{\n    CV_TM_SQDIFF        =0,\n    CV_TM_SQDIFF_NORMED =1,\n    CV_TM_CCORR         =2,\n    CV_TM_CCORR_NORMED  =3,\n    CV_TM_CCOEFF        =4,\n    CV_TM_CCOEFF_NORMED =5\n};\n\ntypedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param );\n\n/** Contour retrieval modes */\nenum\n{\n    CV_RETR_EXTERNAL=0,\n    CV_RETR_LIST=1,\n    CV_RETR_CCOMP=2,\n    CV_RETR_TREE=3,\n    CV_RETR_FLOODFILL=4\n};\n\n/** Contour approximation methods */\nenum\n{\n    CV_CHAIN_CODE=0,\n    CV_CHAIN_APPROX_NONE=1,\n    CV_CHAIN_APPROX_SIMPLE=2,\n    CV_CHAIN_APPROX_TC89_L1=3,\n    CV_CHAIN_APPROX_TC89_KCOS=4,\n    CV_LINK_RUNS=5\n};\n\n/*\nInternal structure that is used for sequential retrieving contours from the image.\nIt supports both hierarchical and plane variants of Suzuki algorithm.\n*/\ntypedef struct _CvContourScanner* CvContourScanner;\n\n/** Freeman chain reader state */\ntypedef struct CvChainPtReader\n{\n    CV_SEQ_READER_FIELDS()\n    char      code;\n    CvPoint   pt;\n    schar     deltas[8][2];\n}\nCvChainPtReader;\n\n/** initializes 8-element array for fast access to 3x3 neighborhood of a pixel */\n#define  CV_INIT_3X3_DELTAS( deltas, step, nch )            \\\n    ((deltas)[0] =  (nch),  (deltas)[1] = -(step) + (nch),  \\\n     (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch),  \\\n     (deltas)[4] = -(nch),  (deltas)[5] =  (step) - (nch),  \\\n     (deltas)[6] =  (step), (deltas)[7] =  (step) + (nch))\n\n\n/** Contour approximation algorithms */\nenum\n{\n    CV_POLY_APPROX_DP = 0\n};\n\n/** @brief Shape matching methods\n\n\\f$A\\f$ denotes object1,\\f$B\\f$ denotes object2\n\n\\f$\\begin{array}{l} m^A_i =  \\mathrm{sign} (h^A_i)  \\cdot \\log{h^A_i} \\\\ m^B_i =  \\mathrm{sign} (h^B_i)  \\cdot \\log{h^B_i} \\end{array}\\f$\n\nand \\f$h^A_i, h^B_i\\f$ are the Hu moments of \\f$A\\f$ and \\f$B\\f$ , respectively.\n*/\nenum ShapeMatchModes\n{\n    CV_CONTOURS_MATCH_I1  =1, //!< \\f[I_1(A,B) =  \\sum _{i=1...7}  \\left |  \\frac{1}{m^A_i} -  \\frac{1}{m^B_i} \\right |\\f]\n    CV_CONTOURS_MATCH_I2  =2, //!< \\f[I_2(A,B) =  \\sum _{i=1...7}  \\left | m^A_i - m^B_i  \\right |\\f]\n    CV_CONTOURS_MATCH_I3  =3  //!< \\f[I_3(A,B) =  \\max _{i=1...7}  \\frac{ \\left| m^A_i - m^B_i \\right| }{ \\left| m^A_i \\right| }\\f]\n};\n\n/** Shape orientation */\nenum\n{\n    CV_CLOCKWISE         =1,\n    CV_COUNTER_CLOCKWISE =2\n};\n\n\n/** Convexity defect */\ntypedef struct CvConvexityDefect\n{\n    CvPoint* start; /**< point of the contour where the defect begins */\n    CvPoint* end; /**< point of the contour where the defect ends */\n    CvPoint* depth_point; /**< the farthest from the convex hull point within the defect */\n    float depth; /**< distance between the farthest point and the convex hull */\n} CvConvexityDefect;\n\n\n/** Histogram comparison methods */\nenum\n{\n    CV_COMP_CORREL        =0,\n    CV_COMP_CHISQR        =1,\n    CV_COMP_INTERSECT     =2,\n    CV_COMP_BHATTACHARYYA =3,\n    CV_COMP_HELLINGER     =CV_COMP_BHATTACHARYYA,\n    CV_COMP_CHISQR_ALT    =4,\n    CV_COMP_KL_DIV        =5\n};\n\n/** Mask size for distance transform */\nenum\n{\n    CV_DIST_MASK_3   =3,\n    CV_DIST_MASK_5   =5,\n    CV_DIST_MASK_PRECISE =0\n};\n\n/** Content of output label array: connected components or pixels */\nenum\n{\n  CV_DIST_LABEL_CCOMP = 0,\n  CV_DIST_LABEL_PIXEL = 1\n};\n\n/** Distance types for Distance Transform and M-estimators */\nenum\n{\n    CV_DIST_USER    =-1,  /**< User defined distance */\n    CV_DIST_L1      =1,   /**< distance = |x1-x2| + |y1-y2| */\n    CV_DIST_L2      =2,   /**< the simple euclidean distance */\n    CV_DIST_C       =3,   /**< distance = max(|x1-x2|,|y1-y2|) */\n    CV_DIST_L12     =4,   /**< L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */\n    CV_DIST_FAIR    =5,   /**< distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */\n    CV_DIST_WELSCH  =6,   /**< distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */\n    CV_DIST_HUBER   =7    /**< distance = |x|<c ? x^2/2 : c(|x|-c/2), c=1.345 */\n};\n\n\n/** Threshold types */\nenum\n{\n    CV_THRESH_BINARY      =0,  /**< value = value > threshold ? max_value : 0       */\n    CV_THRESH_BINARY_INV  =1,  /**< value = value > threshold ? 0 : max_value       */\n    CV_THRESH_TRUNC       =2,  /**< value = value > threshold ? threshold : value   */\n    CV_THRESH_TOZERO      =3,  /**< value = value > threshold ? value : 0           */\n    CV_THRESH_TOZERO_INV  =4,  /**< value = value > threshold ? 0 : value           */\n    CV_THRESH_MASK        =7,\n    CV_THRESH_OTSU        =8, /**< use Otsu algorithm to choose the optimal threshold value;\n                                 combine the flag with one of the above CV_THRESH_* values */\n    CV_THRESH_TRIANGLE    =16  /**< use Triangle algorithm to choose the optimal threshold value;\n                                 combine the flag with one of the above CV_THRESH_* values, but not\n                                 with CV_THRESH_OTSU */\n};\n\n/** Adaptive threshold methods */\nenum\n{\n    CV_ADAPTIVE_THRESH_MEAN_C  =0,\n    CV_ADAPTIVE_THRESH_GAUSSIAN_C  =1\n};\n\n/** FloodFill flags */\nenum\n{\n    CV_FLOODFILL_FIXED_RANGE =(1 << 16),\n    CV_FLOODFILL_MASK_ONLY   =(1 << 17)\n};\n\n\n/** Canny edge detector flags */\nenum\n{\n    CV_CANNY_L2_GRADIENT  =(1 << 31)\n};\n\n/** Variants of a Hough transform */\nenum\n{\n    CV_HOUGH_STANDARD =0,\n    CV_HOUGH_PROBABILISTIC =1,\n    CV_HOUGH_MULTI_SCALE =2,\n    CV_HOUGH_GRADIENT =3\n};\n\n\n/* Fast search data structures  */\nstruct CvFeatureTree;\nstruct CvLSH;\nstruct CvLSHOperations;\n\n/** @} */\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/imgproc.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_IMGPROC_HPP__\n#define __OPENCV_IMGPROC_HPP__\n\n#include \"opencv2/core.hpp\"\n\n/**\n  @defgroup imgproc Image processing\n  @{\n    @defgroup imgproc_filter Image Filtering\n\nFunctions and classes described in this section are used to perform various linear or non-linear\nfiltering operations on 2D images (represented as Mat's). It means that for each pixel location\n\\f$(x,y)\\f$ in the source image (normally, rectangular), its neighborhood is considered and used to\ncompute the response. In case of a linear filter, it is a weighted sum of pixel values. In case of\nmorphological operations, it is the minimum or maximum values, and so on. The computed response is\nstored in the destination image at the same location \\f$(x,y)\\f$. It means that the output image\nwill be of the same size as the input image. Normally, the functions support multi-channel arrays,\nin which case every channel is processed independently. Therefore, the output image will also have\nthe same number of channels as the input one.\n\nAnother common feature of the functions and classes described in this section is that, unlike\nsimple arithmetic functions, they need to extrapolate values of some non-existing pixels. For\nexample, if you want to smooth an image using a Gaussian \\f$3 \\times 3\\f$ filter, then, when\nprocessing the left-most pixels in each row, you need pixels to the left of them, that is, outside\nof the image. You can let these pixels be the same as the left-most image pixels (\"replicated\nborder\" extrapolation method), or assume that all the non-existing pixels are zeros (\"constant\nborder\" extrapolation method), and so on. OpenCV enables you to specify the extrapolation method.\nFor details, see cv::BorderTypes\n\n@anchor filter_depths\n### Depth combinations\nInput depth (src.depth()) | Output depth (ddepth)\n--------------------------|----------------------\nCV_8U                     | -1/CV_16S/CV_32F/CV_64F\nCV_16U/CV_16S             | -1/CV_32F/CV_64F\nCV_32F                    | -1/CV_32F/CV_64F\nCV_64F                    | -1/CV_64F\n\n@note when ddepth=-1, the output image will have the same depth as the source.\n\n    @defgroup imgproc_transform Geometric Image Transformations\n\nThe functions in this section perform various geometrical transformations of 2D images. They do not\nchange the image content but deform the pixel grid and map this deformed grid to the destination\nimage. In fact, to avoid sampling artifacts, the mapping is done in the reverse order, from\ndestination to the source. That is, for each pixel \\f$(x, y)\\f$ of the destination image, the\nfunctions compute coordinates of the corresponding \"donor\" pixel in the source image and copy the\npixel value:\n\n\\f[\\texttt{dst} (x,y)= \\texttt{src} (f_x(x,y), f_y(x,y))\\f]\n\nIn case when you specify the forward mapping \\f$\\left<g_x, g_y\\right>: \\texttt{src} \\rightarrow\n\\texttt{dst}\\f$, the OpenCV functions first compute the corresponding inverse mapping\n\\f$\\left<f_x, f_y\\right>: \\texttt{dst} \\rightarrow \\texttt{src}\\f$ and then use the above formula.\n\nThe actual implementations of the geometrical transformations, from the most generic remap and to\nthe simplest and the fastest resize, need to solve two main problems with the above formula:\n\n- Extrapolation of non-existing pixels. Similarly to the filtering functions described in the\nprevious section, for some \\f$(x,y)\\f$, either one of \\f$f_x(x,y)\\f$, or \\f$f_y(x,y)\\f$, or both\nof them may fall outside of the image. In this case, an extrapolation method needs to be used.\nOpenCV provides the same selection of extrapolation methods as in the filtering functions. In\naddition, it provides the method BORDER_TRANSPARENT. This means that the corresponding pixels in\nthe destination image will not be modified at all.\n\n- Interpolation of pixel values. Usually \\f$f_x(x,y)\\f$ and \\f$f_y(x,y)\\f$ are floating-point\nnumbers. This means that \\f$\\left<f_x, f_y\\right>\\f$ can be either an affine or perspective\ntransformation, or radial lens distortion correction, and so on. So, a pixel value at fractional\ncoordinates needs to be retrieved. In the simplest case, the coordinates can be just rounded to the\nnearest integer coordinates and the corresponding pixel can be used. This is called a\nnearest-neighbor interpolation. However, a better result can be achieved by using more\nsophisticated [interpolation methods](http://en.wikipedia.org/wiki/Multivariate_interpolation) ,\nwhere a polynomial function is fit into some neighborhood of the computed pixel \\f$(f_x(x,y),\nf_y(x,y))\\f$, and then the value of the polynomial at \\f$(f_x(x,y), f_y(x,y))\\f$ is taken as the\ninterpolated pixel value. In OpenCV, you can choose between several interpolation methods. See\nresize for details.\n\n    @defgroup imgproc_misc Miscellaneous Image Transformations\n    @defgroup imgproc_draw Drawing Functions\n\nDrawing functions work with matrices/images of arbitrary depth. The boundaries of the shapes can be\nrendered with antialiasing (implemented only for 8-bit images for now). All the functions include\nthe parameter color that uses an RGB value (that may be constructed with the Scalar constructor )\nfor color images and brightness for grayscale images. For color images, the channel ordering is\nnormally *Blue, Green, Red*. This is what imshow, imread, and imwrite expect. So, if you form a\ncolor using the Scalar constructor, it should look like:\n\n\\f[\\texttt{Scalar} (blue \\_ component, green \\_ component, red \\_ component[, alpha \\_ component])\\f]\n\nIf you are using your own image rendering and I/O functions, you can use any channel ordering. The\ndrawing functions process each channel independently and do not depend on the channel order or even\non the used color space. The whole image can be converted from BGR to RGB or to a different color\nspace using cvtColor .\n\nIf a drawn figure is partially or completely outside the image, the drawing functions clip it. Also,\nmany drawing functions can handle pixel coordinates specified with sub-pixel accuracy. This means\nthat the coordinates can be passed as fixed-point numbers encoded as integers. The number of\nfractional bits is specified by the shift parameter and the real point coordinates are calculated as\n\\f$\\texttt{Point}(x,y)\\rightarrow\\texttt{Point2f}(x*2^{-shift},y*2^{-shift})\\f$ . This feature is\nespecially effective when rendering antialiased shapes.\n\n@note The functions do not support alpha-transparency when the target image is 4-channel. In this\ncase, the color[3] is simply copied to the repainted pixels. Thus, if you want to paint\nsemi-transparent shapes, you can paint them in a separate buffer and then blend it with the main\nimage.\n\n    @defgroup imgproc_colormap ColorMaps in OpenCV\n\nThe human perception isn't built for observing fine changes in grayscale images. Human eyes are more\nsensitive to observing changes between colors, so you often need to recolor your grayscale images to\nget a clue about them. OpenCV now comes with various colormaps to enhance the visualization in your\ncomputer vision application.\n\nIn OpenCV you only need applyColorMap to apply a colormap on a given image. The following sample\ncode reads the path to an image from command line, applies a Jet colormap on it and shows the\nresult:\n\n@code\n#include <opencv2/core.hpp>\n#include <opencv2/imgproc.hpp>\n#include <opencv2/imgcodecs.hpp>\n#include <opencv2/highgui.hpp>\nusing namespace cv;\n\n#include <iostream>\nusing namespace std;\n\nint main(int argc, const char *argv[])\n{\n    // We need an input image. (can be grayscale or color)\n    if (argc < 2)\n    {\n        cerr << \"We need an image to process here. Please run: colorMap [path_to_image]\" << endl;\n        return -1;\n    }\n    Mat img_in = imread(argv[1]);\n    if(img_in.empty())\n    {\n        cerr << \"Sample image (\" << argv[1] << \") is empty. Please adjust your path, so it points to a valid input image!\" << endl;\n        return -1;\n    }\n    // Holds the colormap version of the image:\n    Mat img_color;\n    // Apply the colormap:\n    applyColorMap(img_in, img_color, COLORMAP_JET);\n    // Show the result:\n    imshow(\"colorMap\", img_color);\n    waitKey(0);\n    return 0;\n}\n@endcode\n\n@see cv::ColormapTypes\n\n    @defgroup imgproc_hist Histograms\n    @defgroup imgproc_shape Structural Analysis and Shape Descriptors\n    @defgroup imgproc_motion Motion Analysis and Object Tracking\n    @defgroup imgproc_feature Feature Detection\n    @defgroup imgproc_object Object Detection\n    @defgroup imgproc_c C API\n  @}\n*/\n\nnamespace cv\n{\n\n/** @addtogroup imgproc\n@{\n*/\n\n//! @addtogroup imgproc_filter\n//! @{\n\n//! type of morphological operation\nenum MorphTypes{\n    MORPH_ERODE    = 0, //!< see cv::erode\n    MORPH_DILATE   = 1, //!< see cv::dilate\n    MORPH_OPEN     = 2, //!< an opening operation\n                        //!< \\f[\\texttt{dst} = \\mathrm{open} ( \\texttt{src} , \\texttt{element} )= \\mathrm{dilate} ( \\mathrm{erode} ( \\texttt{src} , \\texttt{element} ))\\f]\n    MORPH_CLOSE    = 3, //!< a closing operation\n                        //!< \\f[\\texttt{dst} = \\mathrm{close} ( \\texttt{src} , \\texttt{element} )= \\mathrm{erode} ( \\mathrm{dilate} ( \\texttt{src} , \\texttt{element} ))\\f]\n    MORPH_GRADIENT = 4, //!< a morphological gradient\n                        //!< \\f[\\texttt{dst} = \\mathrm{morph\\_grad} ( \\texttt{src} , \\texttt{element} )= \\mathrm{dilate} ( \\texttt{src} , \\texttt{element} )- \\mathrm{erode} ( \\texttt{src} , \\texttt{element} )\\f]\n    MORPH_TOPHAT   = 5, //!< \"top hat\"\n                        //!< \\f[\\texttt{dst} = \\mathrm{tophat} ( \\texttt{src} , \\texttt{element} )= \\texttt{src} - \\mathrm{open} ( \\texttt{src} , \\texttt{element} )\\f]\n    MORPH_BLACKHAT = 6, //!< \"black hat\"\n                        //!< \\f[\\texttt{dst} = \\mathrm{blackhat} ( \\texttt{src} , \\texttt{element} )= \\mathrm{close} ( \\texttt{src} , \\texttt{element} )- \\texttt{src}\\f]\n    MORPH_HITMISS  = 7  //!< \"hit and miss\"\n                        //!<   .- Only supported for CV_8UC1 binary images. Tutorial can be found in [this page](http://opencv-code.com/tutorials/hit-or-miss-transform-in-opencv/)\n};\n\n//! shape of the structuring element\nenum MorphShapes {\n    MORPH_RECT    = 0, //!< a rectangular structuring element:  \\f[E_{ij}=1\\f]\n    MORPH_CROSS   = 1, //!< a cross-shaped structuring element:\n                       //!< \\f[E_{ij} =  \\fork{1}{if i=\\texttt{anchor.y} or j=\\texttt{anchor.x}}{0}{otherwise}\\f]\n    MORPH_ELLIPSE = 2 //!< an elliptic structuring element, that is, a filled ellipse inscribed\n                      //!< into the rectangle Rect(0, 0, esize.width, 0.esize.height)\n};\n\n//! @} imgproc_filter\n\n//! @addtogroup imgproc_transform\n//! @{\n\n//! interpolation algorithm\nenum InterpolationFlags{\n    /** nearest neighbor interpolation */\n    INTER_NEAREST        = 0,\n    /** bilinear interpolation */\n    INTER_LINEAR         = 1,\n    /** bicubic interpolation */\n    INTER_CUBIC          = 2,\n    /** resampling using pixel area relation. It may be a preferred method for image decimation, as\n    it gives moire'-free results. But when the image is zoomed, it is similar to the INTER_NEAREST\n    method. */\n    INTER_AREA           = 3,\n    /** Lanczos interpolation over 8x8 neighborhood */\n    INTER_LANCZOS4       = 4,\n    /** mask for interpolation codes */\n    INTER_MAX            = 7,\n    /** flag, fills all of the destination image pixels. If some of them correspond to outliers in the\n    source image, they are set to zero */\n    WARP_FILL_OUTLIERS   = 8,\n    /** flag, inverse transformation\n\n    For example, polar transforms:\n    - flag is __not__ set: \\f$dst( \\phi , \\rho ) = src(x,y)\\f$\n    - flag is set: \\f$dst(x,y) = src( \\phi , \\rho )\\f$\n    */\n    WARP_INVERSE_MAP     = 16\n};\n\nenum InterpolationMasks {\n       INTER_BITS      = 5,\n       INTER_BITS2     = INTER_BITS * 2,\n       INTER_TAB_SIZE  = 1 << INTER_BITS,\n       INTER_TAB_SIZE2 = INTER_TAB_SIZE * INTER_TAB_SIZE\n     };\n\n//! @} imgproc_transform\n\n//! @addtogroup imgproc_misc\n//! @{\n\n//! Distance types for Distance Transform and M-estimators\n//! @see cv::distanceTransform, cv::fitLine\nenum DistanceTypes {\n    DIST_USER    = -1,  //!< User defined distance\n    DIST_L1      = 1,   //!< distance = |x1-x2| + |y1-y2|\n    DIST_L2      = 2,   //!< the simple euclidean distance\n    DIST_C       = 3,   //!< distance = max(|x1-x2|,|y1-y2|)\n    DIST_L12     = 4,   //!< L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1))\n    DIST_FAIR    = 5,   //!< distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998\n    DIST_WELSCH  = 6,   //!< distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846\n    DIST_HUBER   = 7    //!< distance = |x|<c ? x^2/2 : c(|x|-c/2), c=1.345\n};\n\n//! Mask size for distance transform\nenum DistanceTransformMasks {\n    DIST_MASK_3       = 3, //!< mask=3\n    DIST_MASK_5       = 5, //!< mask=5\n    DIST_MASK_PRECISE = 0  //!<\n};\n\n//! type of the threshold operation\n//! ![threshold types](pics/threshold.png)\nenum ThresholdTypes {\n    THRESH_BINARY     = 0, //!< \\f[\\texttt{dst} (x,y) =  \\fork{\\texttt{maxval}}{if \\(\\texttt{src}(x,y) > \\texttt{thresh}\\)}{0}{otherwise}\\f]\n    THRESH_BINARY_INV = 1, //!< \\f[\\texttt{dst} (x,y) =  \\fork{0}{if \\(\\texttt{src}(x,y) > \\texttt{thresh}\\)}{\\texttt{maxval}}{otherwise}\\f]\n    THRESH_TRUNC      = 2, //!< \\f[\\texttt{dst} (x,y) =  \\fork{\\texttt{threshold}}{if \\(\\texttt{src}(x,y) > \\texttt{thresh}\\)}{\\texttt{src}(x,y)}{otherwise}\\f]\n    THRESH_TOZERO     = 3, //!< \\f[\\texttt{dst} (x,y) =  \\fork{\\texttt{src}(x,y)}{if \\(\\texttt{src}(x,y) > \\texttt{thresh}\\)}{0}{otherwise}\\f]\n    THRESH_TOZERO_INV = 4, //!< \\f[\\texttt{dst} (x,y) =  \\fork{0}{if \\(\\texttt{src}(x,y) > \\texttt{thresh}\\)}{\\texttt{src}(x,y)}{otherwise}\\f]\n    THRESH_MASK       = 7,\n    THRESH_OTSU       = 8, //!< flag, use Otsu algorithm to choose the optimal threshold value\n    THRESH_TRIANGLE   = 16 //!< flag, use Triangle algorithm to choose the optimal threshold value\n};\n\n//! adaptive threshold algorithm\n//! see cv::adaptiveThreshold\nenum AdaptiveThresholdTypes {\n    /** the threshold value \\f$T(x,y)\\f$ is a mean of the \\f$\\texttt{blockSize} \\times\n    \\texttt{blockSize}\\f$ neighborhood of \\f$(x, y)\\f$ minus C */\n    ADAPTIVE_THRESH_MEAN_C     = 0,\n    /** the threshold value \\f$T(x, y)\\f$ is a weighted sum (cross-correlation with a Gaussian\n    window) of the \\f$\\texttt{blockSize} \\times \\texttt{blockSize}\\f$ neighborhood of \\f$(x, y)\\f$\n    minus C . The default sigma (standard deviation) is used for the specified blockSize . See\n    cv::getGaussianKernel*/\n    ADAPTIVE_THRESH_GAUSSIAN_C = 1\n};\n\n//! cv::undistort mode\nenum UndistortTypes {\n       PROJ_SPHERICAL_ORTHO  = 0,\n       PROJ_SPHERICAL_EQRECT = 1\n     };\n\n//! class of the pixel in GrabCut algorithm\nenum GrabCutClasses {\n    GC_BGD    = 0,  //!< an obvious background pixels\n    GC_FGD    = 1,  //!< an obvious foreground (object) pixel\n    GC_PR_BGD = 2,  //!< a possible background pixel\n    GC_PR_FGD = 3   //!< a possible foreground pixel\n};\n\n//! GrabCut algorithm flags\nenum GrabCutModes {\n    /** The function initializes the state and the mask using the provided rectangle. After that it\n    runs iterCount iterations of the algorithm. */\n    GC_INIT_WITH_RECT  = 0,\n    /** The function initializes the state using the provided mask. Note that GC_INIT_WITH_RECT\n    and GC_INIT_WITH_MASK can be combined. Then, all the pixels outside of the ROI are\n    automatically initialized with GC_BGD .*/\n    GC_INIT_WITH_MASK  = 1,\n    /** The value means that the algorithm should just resume. */\n    GC_EVAL            = 2\n};\n\n//! distanceTransform algorithm flags\nenum DistanceTransformLabelTypes {\n    /** each connected component of zeros in src (as well as all the non-zero pixels closest to the\n    connected component) will be assigned the same label */\n    DIST_LABEL_CCOMP = 0,\n    /** each zero pixel (and all the non-zero pixels closest to it) gets its own label. */\n    DIST_LABEL_PIXEL = 1\n};\n\n//! floodfill algorithm flags\nenum FloodFillFlags {\n    /** If set, the difference between the current pixel and seed pixel is considered. Otherwise,\n    the difference between neighbor pixels is considered (that is, the range is floating). */\n    FLOODFILL_FIXED_RANGE = 1 << 16,\n    /** If set, the function does not change the image ( newVal is ignored), and only fills the\n    mask with the value specified in bits 8-16 of flags as described above. This option only make\n    sense in function variants that have the mask parameter. */\n    FLOODFILL_MASK_ONLY   = 1 << 17\n};\n\n//! @} imgproc_misc\n\n//! @addtogroup imgproc_shape\n//! @{\n\n//! connected components algorithm output formats\nenum ConnectedComponentsTypes {\n    CC_STAT_LEFT   = 0, //!< The leftmost (x) coordinate which is the inclusive start of the bounding\n                        //!< box in the horizontal direction.\n    CC_STAT_TOP    = 1, //!< The topmost (y) coordinate which is the inclusive start of the bounding\n                        //!< box in the vertical direction.\n    CC_STAT_WIDTH  = 2, //!< The horizontal size of the bounding box\n    CC_STAT_HEIGHT = 3, //!< The vertical size of the bounding box\n    CC_STAT_AREA   = 4, //!< The total area (in pixels) of the connected component\n    CC_STAT_MAX    = 5\n};\n\n//! mode of the contour retrieval algorithm\nenum RetrievalModes {\n    /** retrieves only the extreme outer contours. It sets `hierarchy[i][2]=hierarchy[i][3]=-1` for\n    all the contours. */\n    RETR_EXTERNAL  = 0,\n    /** retrieves all of the contours without establishing any hierarchical relationships. */\n    RETR_LIST      = 1,\n    /** retrieves all of the contours and organizes them into a two-level hierarchy. At the top\n    level, there are external boundaries of the components. At the second level, there are\n    boundaries of the holes. If there is another contour inside a hole of a connected component, it\n    is still put at the top level. */\n    RETR_CCOMP     = 2,\n    /** retrieves all of the contours and reconstructs a full hierarchy of nested contours.*/\n    RETR_TREE      = 3,\n    RETR_FLOODFILL = 4 //!<\n};\n\n//! the contour approximation algorithm\nenum ContourApproximationModes {\n    /** stores absolutely all the contour points. That is, any 2 subsequent points (x1,y1) and\n    (x2,y2) of the contour will be either horizontal, vertical or diagonal neighbors, that is,\n    max(abs(x1-x2),abs(y2-y1))==1. */\n    CHAIN_APPROX_NONE      = 1,\n    /** compresses horizontal, vertical, and diagonal segments and leaves only their end points.\n    For example, an up-right rectangular contour is encoded with 4 points. */\n    CHAIN_APPROX_SIMPLE    = 2,\n    /** applies one of the flavors of the Teh-Chin chain approximation algorithm @cite TehChin89 */\n    CHAIN_APPROX_TC89_L1   = 3,\n    /** applies one of the flavors of the Teh-Chin chain approximation algorithm @cite TehChin89 */\n    CHAIN_APPROX_TC89_KCOS = 4\n};\n\n//! @} imgproc_shape\n\n//! Variants of a Hough transform\nenum HoughModes {\n\n    /** classical or standard Hough transform. Every line is represented by two floating-point\n    numbers \\f$(\\rho, \\theta)\\f$ , where \\f$\\rho\\f$ is a distance between (0,0) point and the line,\n    and \\f$\\theta\\f$ is the angle between x-axis and the normal to the line. Thus, the matrix must\n    be (the created sequence will be) of CV_32FC2 type */\n    HOUGH_STANDARD      = 0,\n    /** probabilistic Hough transform (more efficient in case if the picture contains a few long\n    linear segments). It returns line segments rather than the whole line. Each segment is\n    represented by starting and ending points, and the matrix must be (the created sequence will\n    be) of the CV_32SC4 type. */\n    HOUGH_PROBABILISTIC = 1,\n    /** multi-scale variant of the classical Hough transform. The lines are encoded the same way as\n    HOUGH_STANDARD. */\n    HOUGH_MULTI_SCALE   = 2,\n    HOUGH_GRADIENT      = 3 //!< basically *21HT*, described in @cite Yuen90\n};\n\n//! Variants of Line Segment %Detector\n//! @ingroup imgproc_feature\nenum LineSegmentDetectorModes {\n    LSD_REFINE_NONE = 0, //!< No refinement applied\n    LSD_REFINE_STD  = 1, //!< Standard refinement is applied. E.g. breaking arches into smaller straighter line approximations.\n    LSD_REFINE_ADV  = 2  //!< Advanced refinement. Number of false alarms is calculated, lines are\n                         //!< refined through increase of precision, decrement in size, etc.\n};\n\n/** Histogram comparison methods\n  @ingroup imgproc_hist\n*/\nenum HistCompMethods {\n    /** Correlation\n    \\f[d(H_1,H_2) =  \\frac{\\sum_I (H_1(I) - \\bar{H_1}) (H_2(I) - \\bar{H_2})}{\\sqrt{\\sum_I(H_1(I) - \\bar{H_1})^2 \\sum_I(H_2(I) - \\bar{H_2})^2}}\\f]\n    where\n    \\f[\\bar{H_k} =  \\frac{1}{N} \\sum _J H_k(J)\\f]\n    and \\f$N\\f$ is a total number of histogram bins. */\n    HISTCMP_CORREL        = 0,\n    /** Chi-Square\n    \\f[d(H_1,H_2) =  \\sum _I  \\frac{\\left(H_1(I)-H_2(I)\\right)^2}{H_1(I)}\\f] */\n    HISTCMP_CHISQR        = 1,\n    /** Intersection\n    \\f[d(H_1,H_2) =  \\sum _I  \\min (H_1(I), H_2(I))\\f] */\n    HISTCMP_INTERSECT     = 2,\n    /** Bhattacharyya distance\n    (In fact, OpenCV computes Hellinger distance, which is related to Bhattacharyya coefficient.)\n    \\f[d(H_1,H_2) =  \\sqrt{1 - \\frac{1}{\\sqrt{\\bar{H_1} \\bar{H_2} N^2}} \\sum_I \\sqrt{H_1(I) \\cdot H_2(I)}}\\f] */\n    HISTCMP_BHATTACHARYYA = 3,\n    HISTCMP_HELLINGER     = HISTCMP_BHATTACHARYYA, //!< Synonym for HISTCMP_BHATTACHARYYA\n    /** Alternative Chi-Square\n    \\f[d(H_1,H_2) =  2 * \\sum _I  \\frac{\\left(H_1(I)-H_2(I)\\right)^2}{H_1(I)+H_2(I)}\\f]\n    This alternative formula is regularly used for texture comparison. See e.g. @cite Puzicha1997 */\n    HISTCMP_CHISQR_ALT    = 4,\n    /** Kullback-Leibler divergence\n    \\f[d(H_1,H_2) = \\sum _I H_1(I) \\log \\left(\\frac{H_1(I)}{H_2(I)}\\right)\\f] */\n    HISTCMP_KL_DIV        = 5\n};\n\n/** the color conversion code\n@see @ref imgproc_color_conversions\n@ingroup imgproc_misc\n */\nenum ColorConversionCodes {\n    COLOR_BGR2BGRA     = 0, //!< add alpha channel to RGB or BGR image\n    COLOR_RGB2RGBA     = COLOR_BGR2BGRA,\n\n    COLOR_BGRA2BGR     = 1, //!< remove alpha channel from RGB or BGR image\n    COLOR_RGBA2RGB     = COLOR_BGRA2BGR,\n\n    COLOR_BGR2RGBA     = 2, //!< convert between RGB and BGR color spaces (with or without alpha channel)\n    COLOR_RGB2BGRA     = COLOR_BGR2RGBA,\n\n    COLOR_RGBA2BGR     = 3,\n    COLOR_BGRA2RGB     = COLOR_RGBA2BGR,\n\n    COLOR_BGR2RGB      = 4,\n    COLOR_RGB2BGR      = COLOR_BGR2RGB,\n\n    COLOR_BGRA2RGBA    = 5,\n    COLOR_RGBA2BGRA    = COLOR_BGRA2RGBA,\n\n    COLOR_BGR2GRAY     = 6, //!< convert between RGB/BGR and grayscale, @ref color_convert_rgb_gray \"color conversions\"\n    COLOR_RGB2GRAY     = 7,\n    COLOR_GRAY2BGR     = 8,\n    COLOR_GRAY2RGB     = COLOR_GRAY2BGR,\n    COLOR_GRAY2BGRA    = 9,\n    COLOR_GRAY2RGBA    = COLOR_GRAY2BGRA,\n    COLOR_BGRA2GRAY    = 10,\n    COLOR_RGBA2GRAY    = 11,\n\n    COLOR_BGR2BGR565   = 12, //!< convert between RGB/BGR and BGR565 (16-bit images)\n    COLOR_RGB2BGR565   = 13,\n    COLOR_BGR5652BGR   = 14,\n    COLOR_BGR5652RGB   = 15,\n    COLOR_BGRA2BGR565  = 16,\n    COLOR_RGBA2BGR565  = 17,\n    COLOR_BGR5652BGRA  = 18,\n    COLOR_BGR5652RGBA  = 19,\n\n    COLOR_GRAY2BGR565  = 20, //!< convert between grayscale to BGR565 (16-bit images)\n    COLOR_BGR5652GRAY  = 21,\n\n    COLOR_BGR2BGR555   = 22,  //!< convert between RGB/BGR and BGR555 (16-bit images)\n    COLOR_RGB2BGR555   = 23,\n    COLOR_BGR5552BGR   = 24,\n    COLOR_BGR5552RGB   = 25,\n    COLOR_BGRA2BGR555  = 26,\n    COLOR_RGBA2BGR555  = 27,\n    COLOR_BGR5552BGRA  = 28,\n    COLOR_BGR5552RGBA  = 29,\n\n    COLOR_GRAY2BGR555  = 30, //!< convert between grayscale and BGR555 (16-bit images)\n    COLOR_BGR5552GRAY  = 31,\n\n    COLOR_BGR2XYZ      = 32, //!< convert RGB/BGR to CIE XYZ, @ref color_convert_rgb_xyz \"color conversions\"\n    COLOR_RGB2XYZ      = 33,\n    COLOR_XYZ2BGR      = 34,\n    COLOR_XYZ2RGB      = 35,\n\n    COLOR_BGR2YCrCb    = 36, //!< convert RGB/BGR to luma-chroma (aka YCC), @ref color_convert_rgb_ycrcb \"color conversions\"\n    COLOR_RGB2YCrCb    = 37,\n    COLOR_YCrCb2BGR    = 38,\n    COLOR_YCrCb2RGB    = 39,\n\n    COLOR_BGR2HSV      = 40, //!< convert RGB/BGR to HSV (hue saturation value), @ref color_convert_rgb_hsv \"color conversions\"\n    COLOR_RGB2HSV      = 41,\n\n    COLOR_BGR2Lab      = 44, //!< convert RGB/BGR to CIE Lab, @ref color_convert_rgb_lab \"color conversions\"\n    COLOR_RGB2Lab      = 45,\n\n    COLOR_BGR2Luv      = 50, //!< convert RGB/BGR to CIE Luv, @ref color_convert_rgb_luv \"color conversions\"\n    COLOR_RGB2Luv      = 51,\n    COLOR_BGR2HLS      = 52, //!< convert RGB/BGR to HLS (hue lightness saturation), @ref color_convert_rgb_hls \"color conversions\"\n    COLOR_RGB2HLS      = 53,\n\n    COLOR_HSV2BGR      = 54, //!< backward conversions to RGB/BGR\n    COLOR_HSV2RGB      = 55,\n\n    COLOR_Lab2BGR      = 56,\n    COLOR_Lab2RGB      = 57,\n    COLOR_Luv2BGR      = 58,\n    COLOR_Luv2RGB      = 59,\n    COLOR_HLS2BGR      = 60,\n    COLOR_HLS2RGB      = 61,\n\n    COLOR_BGR2HSV_FULL = 66, //!<\n    COLOR_RGB2HSV_FULL = 67,\n    COLOR_BGR2HLS_FULL = 68,\n    COLOR_RGB2HLS_FULL = 69,\n\n    COLOR_HSV2BGR_FULL = 70,\n    COLOR_HSV2RGB_FULL = 71,\n    COLOR_HLS2BGR_FULL = 72,\n    COLOR_HLS2RGB_FULL = 73,\n\n    COLOR_LBGR2Lab     = 74,\n    COLOR_LRGB2Lab     = 75,\n    COLOR_LBGR2Luv     = 76,\n    COLOR_LRGB2Luv     = 77,\n\n    COLOR_Lab2LBGR     = 78,\n    COLOR_Lab2LRGB     = 79,\n    COLOR_Luv2LBGR     = 80,\n    COLOR_Luv2LRGB     = 81,\n\n    COLOR_BGR2YUV      = 82, //!< convert between RGB/BGR and YUV\n    COLOR_RGB2YUV      = 83,\n    COLOR_YUV2BGR      = 84,\n    COLOR_YUV2RGB      = 85,\n\n    //! YUV 4:2:0 family to RGB\n    COLOR_YUV2RGB_NV12  = 90,\n    COLOR_YUV2BGR_NV12  = 91,\n    COLOR_YUV2RGB_NV21  = 92,\n    COLOR_YUV2BGR_NV21  = 93,\n    COLOR_YUV420sp2RGB  = COLOR_YUV2RGB_NV21,\n    COLOR_YUV420sp2BGR  = COLOR_YUV2BGR_NV21,\n\n    COLOR_YUV2RGBA_NV12 = 94,\n    COLOR_YUV2BGRA_NV12 = 95,\n    COLOR_YUV2RGBA_NV21 = 96,\n    COLOR_YUV2BGRA_NV21 = 97,\n    COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21,\n    COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21,\n\n    COLOR_YUV2RGB_YV12  = 98,\n    COLOR_YUV2BGR_YV12  = 99,\n    COLOR_YUV2RGB_IYUV  = 100,\n    COLOR_YUV2BGR_IYUV  = 101,\n    COLOR_YUV2RGB_I420  = COLOR_YUV2RGB_IYUV,\n    COLOR_YUV2BGR_I420  = COLOR_YUV2BGR_IYUV,\n    COLOR_YUV420p2RGB   = COLOR_YUV2RGB_YV12,\n    COLOR_YUV420p2BGR   = COLOR_YUV2BGR_YV12,\n\n    COLOR_YUV2RGBA_YV12 = 102,\n    COLOR_YUV2BGRA_YV12 = 103,\n    COLOR_YUV2RGBA_IYUV = 104,\n    COLOR_YUV2BGRA_IYUV = 105,\n    COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV,\n    COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV,\n    COLOR_YUV420p2RGBA  = COLOR_YUV2RGBA_YV12,\n    COLOR_YUV420p2BGRA  = COLOR_YUV2BGRA_YV12,\n\n    COLOR_YUV2GRAY_420  = 106,\n    COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420,\n    COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420,\n    COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420,\n    COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420,\n    COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420,\n    COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420,\n    COLOR_YUV420p2GRAY  = COLOR_YUV2GRAY_420,\n\n    //! YUV 4:2:2 family to RGB\n    COLOR_YUV2RGB_UYVY = 107,\n    COLOR_YUV2BGR_UYVY = 108,\n    //COLOR_YUV2RGB_VYUY = 109,\n    //COLOR_YUV2BGR_VYUY = 110,\n    COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY,\n    COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY,\n    COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY,\n    COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY,\n\n    COLOR_YUV2RGBA_UYVY = 111,\n    COLOR_YUV2BGRA_UYVY = 112,\n    //COLOR_YUV2RGBA_VYUY = 113,\n    //COLOR_YUV2BGRA_VYUY = 114,\n    COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY,\n    COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY,\n    COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY,\n    COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY,\n\n    COLOR_YUV2RGB_YUY2 = 115,\n    COLOR_YUV2BGR_YUY2 = 116,\n    COLOR_YUV2RGB_YVYU = 117,\n    COLOR_YUV2BGR_YVYU = 118,\n    COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2,\n    COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2,\n    COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2,\n    COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2,\n\n    COLOR_YUV2RGBA_YUY2 = 119,\n    COLOR_YUV2BGRA_YUY2 = 120,\n    COLOR_YUV2RGBA_YVYU = 121,\n    COLOR_YUV2BGRA_YVYU = 122,\n    COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2,\n    COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2,\n    COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2,\n    COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2,\n\n    COLOR_YUV2GRAY_UYVY = 123,\n    COLOR_YUV2GRAY_YUY2 = 124,\n    //CV_YUV2GRAY_VYUY    = CV_YUV2GRAY_UYVY,\n    COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY,\n    COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY,\n    COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2,\n    COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2,\n    COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2,\n\n    //! alpha premultiplication\n    COLOR_RGBA2mRGBA    = 125,\n    COLOR_mRGBA2RGBA    = 126,\n\n    //! RGB to YUV 4:2:0 family\n    COLOR_RGB2YUV_I420  = 127,\n    COLOR_BGR2YUV_I420  = 128,\n    COLOR_RGB2YUV_IYUV  = COLOR_RGB2YUV_I420,\n    COLOR_BGR2YUV_IYUV  = COLOR_BGR2YUV_I420,\n\n    COLOR_RGBA2YUV_I420 = 129,\n    COLOR_BGRA2YUV_I420 = 130,\n    COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420,\n    COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420,\n    COLOR_RGB2YUV_YV12  = 131,\n    COLOR_BGR2YUV_YV12  = 132,\n    COLOR_RGBA2YUV_YV12 = 133,\n    COLOR_BGRA2YUV_YV12 = 134,\n\n    //! Demosaicing\n    COLOR_BayerBG2BGR = 46,\n    COLOR_BayerGB2BGR = 47,\n    COLOR_BayerRG2BGR = 48,\n    COLOR_BayerGR2BGR = 49,\n\n    COLOR_BayerBG2RGB = COLOR_BayerRG2BGR,\n    COLOR_BayerGB2RGB = COLOR_BayerGR2BGR,\n    COLOR_BayerRG2RGB = COLOR_BayerBG2BGR,\n    COLOR_BayerGR2RGB = COLOR_BayerGB2BGR,\n\n    COLOR_BayerBG2GRAY = 86,\n    COLOR_BayerGB2GRAY = 87,\n    COLOR_BayerRG2GRAY = 88,\n    COLOR_BayerGR2GRAY = 89,\n\n    //! Demosaicing using Variable Number of Gradients\n    COLOR_BayerBG2BGR_VNG = 62,\n    COLOR_BayerGB2BGR_VNG = 63,\n    COLOR_BayerRG2BGR_VNG = 64,\n    COLOR_BayerGR2BGR_VNG = 65,\n\n    COLOR_BayerBG2RGB_VNG = COLOR_BayerRG2BGR_VNG,\n    COLOR_BayerGB2RGB_VNG = COLOR_BayerGR2BGR_VNG,\n    COLOR_BayerRG2RGB_VNG = COLOR_BayerBG2BGR_VNG,\n    COLOR_BayerGR2RGB_VNG = COLOR_BayerGB2BGR_VNG,\n\n    //! Edge-Aware Demosaicing\n    COLOR_BayerBG2BGR_EA  = 135,\n    COLOR_BayerGB2BGR_EA  = 136,\n    COLOR_BayerRG2BGR_EA  = 137,\n    COLOR_BayerGR2BGR_EA  = 138,\n\n    COLOR_BayerBG2RGB_EA  = COLOR_BayerRG2BGR_EA,\n    COLOR_BayerGB2RGB_EA  = COLOR_BayerGR2BGR_EA,\n    COLOR_BayerRG2RGB_EA  = COLOR_BayerBG2BGR_EA,\n    COLOR_BayerGR2RGB_EA  = COLOR_BayerGB2BGR_EA,\n\n\n    COLOR_COLORCVT_MAX  = 139\n};\n\n/** types of intersection between rectangles\n@ingroup imgproc_shape\n*/\nenum RectanglesIntersectTypes {\n    INTERSECT_NONE = 0, //!< No intersection\n    INTERSECT_PARTIAL  = 1, //!< There is a partial intersection\n    INTERSECT_FULL  = 2 //!< One of the rectangle is fully enclosed in the other\n};\n\n//! finds arbitrary template in the grayscale image using Generalized Hough Transform\nclass CV_EXPORTS GeneralizedHough : public Algorithm\n{\npublic:\n    //! set template to search\n    virtual void setTemplate(InputArray templ, Point templCenter = Point(-1, -1)) = 0;\n    virtual void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1)) = 0;\n\n    //! find template on image\n    virtual void detect(InputArray image, OutputArray positions, OutputArray votes = noArray()) = 0;\n    virtual void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = noArray()) = 0;\n\n    //! Canny low threshold.\n    virtual void setCannyLowThresh(int cannyLowThresh) = 0;\n    virtual int getCannyLowThresh() const = 0;\n\n    //! Canny high threshold.\n    virtual void setCannyHighThresh(int cannyHighThresh) = 0;\n    virtual int getCannyHighThresh() const = 0;\n\n    //! Minimum distance between the centers of the detected objects.\n    virtual void setMinDist(double minDist) = 0;\n    virtual double getMinDist() const = 0;\n\n    //! Inverse ratio of the accumulator resolution to the image resolution.\n    virtual void setDp(double dp) = 0;\n    virtual double getDp() const = 0;\n\n    //! Maximal size of inner buffers.\n    virtual void setMaxBufferSize(int maxBufferSize) = 0;\n    virtual int getMaxBufferSize() const = 0;\n};\n\n//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122.\n//! Detects position only without traslation and rotation\nclass CV_EXPORTS GeneralizedHoughBallard : public GeneralizedHough\n{\npublic:\n    //! R-Table levels.\n    virtual void setLevels(int levels) = 0;\n    virtual int getLevels() const = 0;\n\n    //! The accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected.\n    virtual void setVotesThreshold(int votesThreshold) = 0;\n    virtual int getVotesThreshold() const = 0;\n};\n\n//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038.\n//! Detects position, traslation and rotation\nclass CV_EXPORTS GeneralizedHoughGuil : public GeneralizedHough\n{\npublic:\n    //! Angle difference in degrees between two points in feature.\n    virtual void setXi(double xi) = 0;\n    virtual double getXi() const = 0;\n\n    //! Feature table levels.\n    virtual void setLevels(int levels) = 0;\n    virtual int getLevels() const = 0;\n\n    //! Maximal difference between angles that treated as equal.\n    virtual void setAngleEpsilon(double angleEpsilon) = 0;\n    virtual double getAngleEpsilon() const = 0;\n\n    //! Minimal rotation angle to detect in degrees.\n    virtual void setMinAngle(double minAngle) = 0;\n    virtual double getMinAngle() const = 0;\n\n    //! Maximal rotation angle to detect in degrees.\n    virtual void setMaxAngle(double maxAngle) = 0;\n    virtual double getMaxAngle() const = 0;\n\n    //! Angle step in degrees.\n    virtual void setAngleStep(double angleStep) = 0;\n    virtual double getAngleStep() const = 0;\n\n    //! Angle votes threshold.\n    virtual void setAngleThresh(int angleThresh) = 0;\n    virtual int getAngleThresh() const = 0;\n\n    //! Minimal scale to detect.\n    virtual void setMinScale(double minScale) = 0;\n    virtual double getMinScale() const = 0;\n\n    //! Maximal scale to detect.\n    virtual void setMaxScale(double maxScale) = 0;\n    virtual double getMaxScale() const = 0;\n\n    //! Scale step.\n    virtual void setScaleStep(double scaleStep) = 0;\n    virtual double getScaleStep() const = 0;\n\n    //! Scale votes threshold.\n    virtual void setScaleThresh(int scaleThresh) = 0;\n    virtual int getScaleThresh() const = 0;\n\n    //! Position votes threshold.\n    virtual void setPosThresh(int posThresh) = 0;\n    virtual int getPosThresh() const = 0;\n};\n\n\nclass CV_EXPORTS_W CLAHE : public Algorithm\n{\npublic:\n    CV_WRAP virtual void apply(InputArray src, OutputArray dst) = 0;\n\n    CV_WRAP virtual void setClipLimit(double clipLimit) = 0;\n    CV_WRAP virtual double getClipLimit() const = 0;\n\n    CV_WRAP virtual void setTilesGridSize(Size tileGridSize) = 0;\n    CV_WRAP virtual Size getTilesGridSize() const = 0;\n\n    CV_WRAP virtual void collectGarbage() = 0;\n};\n\n\nclass CV_EXPORTS_W Subdiv2D\n{\npublic:\n    enum { PTLOC_ERROR        = -2,\n           PTLOC_OUTSIDE_RECT = -1,\n           PTLOC_INSIDE       = 0,\n           PTLOC_VERTEX       = 1,\n           PTLOC_ON_EDGE      = 2\n         };\n\n    enum { NEXT_AROUND_ORG   = 0x00,\n           NEXT_AROUND_DST   = 0x22,\n           PREV_AROUND_ORG   = 0x11,\n           PREV_AROUND_DST   = 0x33,\n           NEXT_AROUND_LEFT  = 0x13,\n           NEXT_AROUND_RIGHT = 0x31,\n           PREV_AROUND_LEFT  = 0x20,\n           PREV_AROUND_RIGHT = 0x02\n         };\n\n    CV_WRAP Subdiv2D();\n    CV_WRAP Subdiv2D(Rect rect);\n    CV_WRAP void initDelaunay(Rect rect);\n\n    CV_WRAP int insert(Point2f pt);\n    CV_WRAP void insert(const std::vector<Point2f>& ptvec);\n    CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex);\n\n    CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt = 0);\n    CV_WRAP void getEdgeList(CV_OUT std::vector<Vec4f>& edgeList) const;\n    CV_WRAP void getTriangleList(CV_OUT std::vector<Vec6f>& triangleList) const;\n    CV_WRAP void getVoronoiFacetList(const std::vector<int>& idx, CV_OUT std::vector<std::vector<Point2f> >& facetList,\n                                     CV_OUT std::vector<Point2f>& facetCenters);\n\n    CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge = 0) const;\n\n    CV_WRAP int getEdge( int edge, int nextEdgeType ) const;\n    CV_WRAP int nextEdge(int edge) const;\n    CV_WRAP int rotateEdge(int edge, int rotate) const;\n    CV_WRAP int symEdge(int edge) const;\n    CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt = 0) const;\n    CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt = 0) const;\n\nprotected:\n    int newEdge();\n    void deleteEdge(int edge);\n    int newPoint(Point2f pt, bool isvirtual, int firstEdge = 0);\n    void deletePoint(int vtx);\n    void setEdgePoints( int edge, int orgPt, int dstPt );\n    void splice( int edgeA, int edgeB );\n    int connectEdges( int edgeA, int edgeB );\n    void swapEdges( int edge );\n    int isRightOf(Point2f pt, int edge) const;\n    void calcVoronoi();\n    void clearVoronoi();\n    void checkSubdiv() const;\n\n    struct CV_EXPORTS Vertex\n    {\n        Vertex();\n        Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0);\n        bool isvirtual() const;\n        bool isfree() const;\n\n        int firstEdge;\n        int type;\n        Point2f pt;\n    };\n\n    struct CV_EXPORTS QuadEdge\n    {\n        QuadEdge();\n        QuadEdge(int edgeidx);\n        bool isfree() const;\n\n        int next[4];\n        int pt[4];\n    };\n\n    std::vector<Vertex> vtx;\n    std::vector<QuadEdge> qedges;\n    int freeQEdge;\n    int freePoint;\n    bool validGeometry;\n\n    int recentEdge;\n    Point2f topLeft;\n    Point2f bottomRight;\n};\n\n//! @addtogroup imgproc_feature\n//! @{\n\n/** @example lsd_lines.cpp\nAn example using the LineSegmentDetector\n*/\n\n/** @brief Line segment detector class\n\nfollowing the algorithm described at @cite Rafael12 .\n*/\nclass CV_EXPORTS_W LineSegmentDetector : public Algorithm\n{\npublic:\n\n    /** @brief Finds lines in the input image.\n\n    This is the output of the default parameters of the algorithm on the above shown image.\n\n    ![image](pics/building_lsd.png)\n\n    @param _image A grayscale (CV_8UC1) input image. If only a roi needs to be selected, use:\n    `lsd_ptr-\\>detect(image(roi), lines, ...); lines += Scalar(roi.x, roi.y, roi.x, roi.y);`\n    @param _lines A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. Where\n    Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly\n    oriented depending on the gradient.\n    @param width Vector of widths of the regions, where the lines are found. E.g. Width of line.\n    @param prec Vector of precisions with which the lines are found.\n    @param nfa Vector containing number of false alarms in the line region, with precision of 10%. The\n    bigger the value, logarithmically better the detection.\n    - -1 corresponds to 10 mean false alarms\n    - 0 corresponds to 1 mean false alarm\n    - 1 corresponds to 0.1 mean false alarms\n    This vector will be calculated only when the objects type is LSD_REFINE_ADV.\n    */\n    CV_WRAP virtual void detect(InputArray _image, OutputArray _lines,\n                        OutputArray width = noArray(), OutputArray prec = noArray(),\n                        OutputArray nfa = noArray()) = 0;\n\n    /** @brief Draws the line segments on a given image.\n    @param _image The image, where the liens will be drawn. Should be bigger or equal to the image,\n    where the lines were found.\n    @param lines A vector of the lines that needed to be drawn.\n     */\n    CV_WRAP virtual void drawSegments(InputOutputArray _image, InputArray lines) = 0;\n\n    /** @brief Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels.\n\n    @param size The size of the image, where lines1 and lines2 were found.\n    @param lines1 The first group of lines that needs to be drawn. It is visualized in blue color.\n    @param lines2 The second group of lines. They visualized in red color.\n    @param _image Optional image, where the lines will be drawn. The image should be color(3-channel)\n    in order for lines1 and lines2 to be drawn in the above mentioned colors.\n     */\n    CV_WRAP virtual int compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image = noArray()) = 0;\n\n    virtual ~LineSegmentDetector() { }\n};\n\n/** @brief Creates a smart pointer to a LineSegmentDetector object and initializes it.\n\nThe LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want\nto edit those, as to tailor it for their own application.\n\n@param _refine The way found lines will be refined, see cv::LineSegmentDetectorModes\n@param _scale The scale of the image that will be used to find the lines. Range (0..1].\n@param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.\n@param _quant Bound to the quantization error on the gradient norm.\n@param _ang_th Gradient angle tolerance in degrees.\n@param _log_eps Detection threshold: -log10(NFA) \\> log_eps. Used only when advancent refinement\nis chosen.\n@param _density_th Minimal density of aligned region points in the enclosing rectangle.\n@param _n_bins Number of bins in pseudo-ordering of gradient modulus.\n */\nCV_EXPORTS_W Ptr<LineSegmentDetector> createLineSegmentDetector(\n    int _refine = LSD_REFINE_STD, double _scale = 0.8,\n    double _sigma_scale = 0.6, double _quant = 2.0, double _ang_th = 22.5,\n    double _log_eps = 0, double _density_th = 0.7, int _n_bins = 1024);\n\n//! @} imgproc_feature\n\n//! @addtogroup imgproc_filter\n//! @{\n\n/** @brief Returns Gaussian filter coefficients.\n\nThe function computes and returns the \\f$\\texttt{ksize} \\times 1\\f$ matrix of Gaussian filter\ncoefficients:\n\n\\f[G_i= \\alpha *e^{-(i-( \\texttt{ksize} -1)/2)^2/(2* \\texttt{sigma}^2)},\\f]\n\nwhere \\f$i=0..\\texttt{ksize}-1\\f$ and \\f$\\alpha\\f$ is the scale factor chosen so that \\f$\\sum_i G_i=1\\f$.\n\nTwo of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize\nsmoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly.\nYou may also use the higher-level GaussianBlur.\n@param ksize Aperture size. It should be odd ( \\f$\\texttt{ksize} \\mod 2 = 1\\f$ ) and positive.\n@param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as\n`sigma = 0.3\\*((ksize-1)\\*0.5 - 1) + 0.8`.\n@param ktype Type of filter coefficients. It can be CV_32F or CV_64F .\n@sa  sepFilter2D, getDerivKernels, getStructuringElement, GaussianBlur\n */\nCV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype = CV_64F );\n\n/** @brief Returns filter coefficients for computing spatial image derivatives.\n\nThe function computes and returns the filter coefficients for spatial image derivatives. When\n`ksize=CV_SCHARR`, the Scharr \\f$3 \\times 3\\f$ kernels are generated (see cv::Scharr). Otherwise, Sobel\nkernels are generated (see cv::Sobel). The filters are normally passed to sepFilter2D or to\n\n@param kx Output matrix of row filter coefficients. It has the type ktype .\n@param ky Output matrix of column filter coefficients. It has the type ktype .\n@param dx Derivative order in respect of x.\n@param dy Derivative order in respect of y.\n@param ksize Aperture size. It can be CV_SCHARR, 1, 3, 5, or 7.\n@param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not.\nTheoretically, the coefficients should have the denominator \\f$=2^{ksize*2-dx-dy-2}\\f$. If you are\ngoing to filter floating-point images, you are likely to use the normalized kernels. But if you\ncompute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve\nall the fractional bits, you may want to set normalize=false .\n@param ktype Type of filter coefficients. It can be CV_32f or CV_64F .\n */\nCV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky,\n                                   int dx, int dy, int ksize,\n                                   bool normalize = false, int ktype = CV_32F );\n\n/** @brief Returns Gabor filter coefficients.\n\nFor more details about gabor filter equations and parameters, see: [Gabor\nFilter](http://en.wikipedia.org/wiki/Gabor_filter).\n\n@param ksize Size of the filter returned.\n@param sigma Standard deviation of the gaussian envelope.\n@param theta Orientation of the normal to the parallel stripes of a Gabor function.\n@param lambd Wavelength of the sinusoidal factor.\n@param gamma Spatial aspect ratio.\n@param psi Phase offset.\n@param ktype Type of filter coefficients. It can be CV_32F or CV_64F .\n */\nCV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd,\n                                 double gamma, double psi = CV_PI*0.5, int ktype = CV_64F );\n\n//! returns \"magic\" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation.\nstatic inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); }\n\n/** @brief Returns a structuring element of the specified size and shape for morphological operations.\n\nThe function constructs and returns the structuring element that can be further passed to cv::erode,\ncv::dilate or cv::morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as\nthe structuring element.\n\n@param shape Element shape that could be one of cv::MorphShapes\n@param ksize Size of the structuring element.\n@param anchor Anchor position within the element. The default value \\f$(-1, -1)\\f$ means that the\nanchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor\nposition. In other cases the anchor just regulates how much the result of the morphological\noperation is shifted.\n */\nCV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1));\n\n/** @brief Blurs an image using the median filter.\n\nThe function smoothes an image using the median filter with the \\f$\\texttt{ksize} \\times\n\\texttt{ksize}\\f$ aperture. Each channel of a multi-channel image is processed independently.\nIn-place operation is supported.\n\n@param src input 1-, 3-, or 4-channel image; when ksize is 3 or 5, the image depth should be\nCV_8U, CV_16U, or CV_32F, for larger aperture sizes, it can only be CV_8U.\n@param dst destination array of the same size and type as src.\n@param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...\n@sa  bilateralFilter, blur, boxFilter, GaussianBlur\n */\nCV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize );\n\n/** @brief Blurs an image using a Gaussian filter.\n\nThe function convolves the source image with the specified Gaussian kernel. In-place filtering is\nsupported.\n\n@param src input image; the image can have any number of channels, which are processed\nindependently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.\n@param dst output image of the same size and type as src.\n@param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be\npositive and odd. Or, they can be zero's and then they are computed from sigma.\n@param sigmaX Gaussian kernel standard deviation in X direction.\n@param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be\nequal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,\nrespectively (see cv::getGaussianKernel for details); to fully control the result regardless of\npossible future modifications of all this semantics, it is recommended to specify all of ksize,\nsigmaX, and sigmaY.\n@param borderType pixel extrapolation method, see cv::BorderTypes\n\n@sa  sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur\n */\nCV_EXPORTS_W void GaussianBlur( InputArray src, OutputArray dst, Size ksize,\n                                double sigmaX, double sigmaY = 0,\n                                int borderType = BORDER_DEFAULT );\n\n/** @brief Applies the bilateral filter to an image.\n\nThe function applies bilateral filtering to the input image, as described in\nhttp://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html\nbilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is\nvery slow compared to most filters.\n\n_Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (\\<\n10), the filter will not have much effect, whereas if they are large (\\> 150), they will have a very\nstrong effect, making the image look \"cartoonish\".\n\n_Filter size_: Large filters (d \\> 5) are very slow, so it is recommended to use d=5 for real-time\napplications, and perhaps d=9 for offline applications that need heavy noise filtering.\n\nThis filter does not work inplace.\n@param src Source 8-bit or floating-point, 1-channel or 3-channel image.\n@param dst Destination image of the same size and type as src .\n@param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,\nit is computed from sigmaSpace.\n@param sigmaColor Filter sigma in the color space. A larger value of the parameter means that\nfarther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting\nin larger areas of semi-equal color.\n@param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that\nfarther pixels will influence each other as long as their colors are close enough (see sigmaColor\n). When d\\>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is\nproportional to sigmaSpace.\n@param borderType border mode used to extrapolate pixels outside of the image, see cv::BorderTypes\n */\nCV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d,\n                                   double sigmaColor, double sigmaSpace,\n                                   int borderType = BORDER_DEFAULT );\n\n/** @brief Blurs an image using the box filter.\n\nThe function smoothes an image using the kernel:\n\n\\f[\\texttt{K} =  \\alpha \\begin{bmatrix} 1 & 1 & 1 &  \\cdots & 1 & 1  \\\\ 1 & 1 & 1 &  \\cdots & 1 & 1  \\\\ \\hdotsfor{6} \\\\ 1 & 1 & 1 &  \\cdots & 1 & 1 \\end{bmatrix}\\f]\n\nwhere\n\n\\f[\\alpha = \\fork{\\frac{1}{\\texttt{ksize.width*ksize.height}}}{when \\texttt{normalize=true}}{1}{otherwise}\\f]\n\nUnnormalized box filter is useful for computing various integral characteristics over each pixel\nneighborhood, such as covariance matrices of image derivatives (used in dense optical flow\nalgorithms, and so on). If you need to compute pixel sums over variable-size windows, use cv::integral.\n\n@param src input image.\n@param dst output image of the same size and type as src.\n@param ddepth the output image depth (-1 to use src.depth()).\n@param ksize blurring kernel size.\n@param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel\ncenter.\n@param normalize flag, specifying whether the kernel is normalized by its area or not.\n@param borderType border mode used to extrapolate pixels outside of the image, see cv::BorderTypes\n@sa  blur, bilateralFilter, GaussianBlur, medianBlur, integral\n */\nCV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth,\n                             Size ksize, Point anchor = Point(-1,-1),\n                             bool normalize = true,\n                             int borderType = BORDER_DEFAULT );\n\n/** @brief Calculates the normalized sum of squares of the pixel values overlapping the filter.\n\nFor every pixel \\f$ (x, y) \\f$ in the source image, the function calculates the sum of squares of those neighboring\npixel values which overlap the filter placed over the pixel \\f$ (x, y) \\f$.\n\nThe unnormalized square box filter can be useful in computing local image statistics such as the the local\nvariance and standard deviation around the neighborhood of a pixel.\n\n@param _src input image\n@param _dst output image of the same size and type as _src\n@param ddepth the output image depth (-1 to use src.depth())\n@param ksize kernel size\n@param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel\ncenter.\n@param normalize flag, specifying whether the kernel is to be normalized by it's area or not.\n@param borderType border mode used to extrapolate pixels outside of the image, see cv::BorderTypes\n@sa boxFilter\n*/\nCV_EXPORTS_W void sqrBoxFilter( InputArray _src, OutputArray _dst, int ddepth,\n                                Size ksize, Point anchor = Point(-1, -1),\n                                bool normalize = true,\n                                int borderType = BORDER_DEFAULT );\n\n/** @brief Blurs an image using the normalized box filter.\n\nThe function smoothes an image using the kernel:\n\n\\f[\\texttt{K} =  \\frac{1}{\\texttt{ksize.width*ksize.height}} \\begin{bmatrix} 1 & 1 & 1 &  \\cdots & 1 & 1  \\\\ 1 & 1 & 1 &  \\cdots & 1 & 1  \\\\ \\hdotsfor{6} \\\\ 1 & 1 & 1 &  \\cdots & 1 & 1  \\\\ \\end{bmatrix}\\f]\n\nThe call `blur(src, dst, ksize, anchor, borderType)` is equivalent to `boxFilter(src, dst, src.type(),\nanchor, true, borderType)`.\n\n@param src input image; it can have any number of channels, which are processed independently, but\nthe depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.\n@param dst output image of the same size and type as src.\n@param ksize blurring kernel size.\n@param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel\ncenter.\n@param borderType border mode used to extrapolate pixels outside of the image, see cv::BorderTypes\n@sa  boxFilter, bilateralFilter, GaussianBlur, medianBlur\n */\nCV_EXPORTS_W void blur( InputArray src, OutputArray dst,\n                        Size ksize, Point anchor = Point(-1,-1),\n                        int borderType = BORDER_DEFAULT );\n\n/** @brief Convolves an image with the kernel.\n\nThe function applies an arbitrary linear filter to an image. In-place operation is supported. When\nthe aperture is partially outside the image, the function interpolates outlier pixel values\naccording to the specified border mode.\n\nThe function does actually compute correlation, not the convolution:\n\n\\f[\\texttt{dst} (x,y) =  \\sum _{ \\stackrel{0\\leq x' < \\texttt{kernel.cols},}{0\\leq y' < \\texttt{kernel.rows}} }  \\texttt{kernel} (x',y')* \\texttt{src} (x+x'- \\texttt{anchor.x} ,y+y'- \\texttt{anchor.y} )\\f]\n\nThat is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip\nthe kernel using cv::flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -\nanchor.y - 1)`.\n\nThe function uses the DFT-based algorithm in case of sufficiently large kernels (~`11 x 11` or\nlarger) and the direct algorithm for small kernels.\n\n@param src input image.\n@param dst output image of the same size and the same number of channels as src.\n@param ddepth desired depth of the destination image, see @ref filter_depths \"combinations\"\n@param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point\nmatrix; if you want to apply different kernels to different channels, split the image into\nseparate color planes using split and process them individually.\n@param anchor anchor of the kernel that indicates the relative position of a filtered point within\nthe kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor\nis at the kernel center.\n@param delta optional value added to the filtered pixels before storing them in dst.\n@param borderType pixel extrapolation method, see cv::BorderTypes\n@sa  sepFilter2D, dft, matchTemplate\n */\nCV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth,\n                            InputArray kernel, Point anchor = Point(-1,-1),\n                            double delta = 0, int borderType = BORDER_DEFAULT );\n\n/** @brief Applies a separable linear filter to an image.\n\nThe function applies a separable linear filter to the image. That is, first, every row of src is\nfiltered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D\nkernel kernelY. The final result shifted by delta is stored in dst .\n\n@param src Source image.\n@param dst Destination image of the same size and the same number of channels as src .\n@param ddepth Destination image depth, see @ref filter_depths \"combinations\"\n@param kernelX Coefficients for filtering each row.\n@param kernelY Coefficients for filtering each column.\n@param anchor Anchor position within the kernel. The default value \\f$(-1,-1)\\f$ means that the anchor\nis at the kernel center.\n@param delta Value added to the filtered results before storing them.\n@param borderType Pixel extrapolation method, see cv::BorderTypes\n@sa  filter2D, Sobel, GaussianBlur, boxFilter, blur\n */\nCV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth,\n                               InputArray kernelX, InputArray kernelY,\n                               Point anchor = Point(-1,-1),\n                               double delta = 0, int borderType = BORDER_DEFAULT );\n\n/** @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.\n\nIn all cases except one, the \\f$\\texttt{ksize} \\times \\texttt{ksize}\\f$ separable kernel is used to\ncalculate the derivative. When \\f$\\texttt{ksize = 1}\\f$, the \\f$3 \\times 1\\f$ or \\f$1 \\times 3\\f$\nkernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first\nor the second x- or y- derivatives.\n\nThere is also the special value `ksize = CV_SCHARR (-1)` that corresponds to the \\f$3\\times3\\f$ Scharr\nfilter that may give more accurate results than the \\f$3\\times3\\f$ Sobel. The Scharr aperture is\n\n\\f[\\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\\f]\n\nfor the x-derivative, or transposed for the y-derivative.\n\nThe function calculates an image derivative by convolving the image with the appropriate kernel:\n\n\\f[\\texttt{dst} =  \\frac{\\partial^{xorder+yorder} \\texttt{src}}{\\partial x^{xorder} \\partial y^{yorder}}\\f]\n\nThe Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less\nresistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)\nor ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first\ncase corresponds to a kernel of:\n\n\\f[\\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\\f]\n\nThe second case corresponds to a kernel of:\n\n\\f[\\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\\f]\n\n@param src input image.\n@param dst output image of the same size and the same number of channels as src .\n@param ddepth output image depth, see @ref filter_depths \"combinations\"; in the case of\n    8-bit input images it will result in truncated derivatives.\n@param dx order of the derivative x.\n@param dy order of the derivative y.\n@param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.\n@param scale optional scale factor for the computed derivative values; by default, no scaling is\napplied (see cv::getDerivKernels for details).\n@param delta optional delta value that is added to the results prior to storing them in dst.\n@param borderType pixel extrapolation method, see cv::BorderTypes\n@sa  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar\n */\nCV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth,\n                         int dx, int dy, int ksize = 3,\n                         double scale = 1, double delta = 0,\n                         int borderType = BORDER_DEFAULT );\n\n/** @brief Calculates the first order image derivative in both x and y using a Sobel operator\n\nEquivalent to calling:\n\n@code\nSobel( src, dx, CV_16SC1, 1, 0, 3 );\nSobel( src, dy, CV_16SC1, 0, 1, 3 );\n@endcode\n\n@param src input image.\n@param dx output image with first-order derivative in x.\n@param dy output image with first-order derivative in y.\n@param ksize size of Sobel kernel. It must be 3.\n@param borderType pixel extrapolation method, see cv::BorderTypes\n\n@sa Sobel\n */\n\nCV_EXPORTS_W void spatialGradient( InputArray src, OutputArray dx,\n                                   OutputArray dy, int ksize = 3,\n                                   int borderType = BORDER_DEFAULT );\n\n/** @brief Calculates the first x- or y- image derivative using Scharr operator.\n\nThe function computes the first x- or y- spatial image derivative using the Scharr operator. The\ncall\n\n\\f[\\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\\f]\n\nis equivalent to\n\n\\f[\\texttt{Sobel(src, dst, ddepth, dx, dy, CV\\_SCHARR, scale, delta, borderType)} .\\f]\n\n@param src input image.\n@param dst output image of the same size and the same number of channels as src.\n@param ddepth output image depth, see @ref filter_depths \"combinations\"\n@param dx order of the derivative x.\n@param dy order of the derivative y.\n@param scale optional scale factor for the computed derivative values; by default, no scaling is\napplied (see getDerivKernels for details).\n@param delta optional delta value that is added to the results prior to storing them in dst.\n@param borderType pixel extrapolation method, see cv::BorderTypes\n@sa  cartToPolar\n */\nCV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth,\n                          int dx, int dy, double scale = 1, double delta = 0,\n                          int borderType = BORDER_DEFAULT );\n\n/** @example laplace.cpp\n  An example using Laplace transformations for edge detection\n*/\n\n/** @brief Calculates the Laplacian of an image.\n\nThe function calculates the Laplacian of the source image by adding up the second x and y\nderivatives calculated using the Sobel operator:\n\n\\f[\\texttt{dst} =  \\Delta \\texttt{src} =  \\frac{\\partial^2 \\texttt{src}}{\\partial x^2} +  \\frac{\\partial^2 \\texttt{src}}{\\partial y^2}\\f]\n\nThis is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image\nwith the following \\f$3 \\times 3\\f$ aperture:\n\n\\f[\\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\\f]\n\n@param src Source image.\n@param dst Destination image of the same size and the same number of channels as src .\n@param ddepth Desired depth of the destination image.\n@param ksize Aperture size used to compute the second-derivative filters. See getDerivKernels for\ndetails. The size must be positive and odd.\n@param scale Optional scale factor for the computed Laplacian values. By default, no scaling is\napplied. See getDerivKernels for details.\n@param delta Optional delta value that is added to the results prior to storing them in dst .\n@param borderType Pixel extrapolation method, see cv::BorderTypes\n@sa  Sobel, Scharr\n */\nCV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth,\n                             int ksize = 1, double scale = 1, double delta = 0,\n                             int borderType = BORDER_DEFAULT );\n\n//! @} imgproc_filter\n\n//! @addtogroup imgproc_feature\n//! @{\n\n/** @example edge.cpp\n  An example on using the canny edge detector\n*/\n\n/** @brief Finds edges in an image using the Canny algorithm @cite Canny86 .\n\nThe function finds edges in the input image image and marks them in the output map edges using the\nCanny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The\nlargest value is used to find initial segments of strong edges. See\n<http://en.wikipedia.org/wiki/Canny_edge_detector>\n\n@param image 8-bit input image.\n@param edges output edge map; single channels 8-bit image, which has the same size as image .\n@param threshold1 first threshold for the hysteresis procedure.\n@param threshold2 second threshold for the hysteresis procedure.\n@param apertureSize aperture size for the Sobel operator.\n@param L2gradient a flag, indicating whether a more accurate \\f$L_2\\f$ norm\n\\f$=\\sqrt{(dI/dx)^2 + (dI/dy)^2}\\f$ should be used to calculate the image gradient magnitude (\nL2gradient=true ), or whether the default \\f$L_1\\f$ norm \\f$=|dI/dx|+|dI/dy|\\f$ is enough (\nL2gradient=false ).\n */\nCV_EXPORTS_W void Canny( InputArray image, OutputArray edges,\n                         double threshold1, double threshold2,\n                         int apertureSize = 3, bool L2gradient = false );\n\n/** @brief Calculates the minimal eigenvalue of gradient matrices for corner detection.\n\nThe function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal\neigenvalue of the covariance matrix of derivatives, that is, \\f$\\min(\\lambda_1, \\lambda_2)\\f$ in terms\nof the formulae in the cornerEigenValsAndVecs description.\n\n@param src Input single-channel 8-bit or floating-point image.\n@param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as\nsrc .\n@param blockSize Neighborhood size (see the details on cornerEigenValsAndVecs ).\n@param ksize Aperture parameter for the Sobel operator.\n@param borderType Pixel extrapolation method. See cv::BorderTypes.\n */\nCV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst,\n                                     int blockSize, int ksize = 3,\n                                     int borderType = BORDER_DEFAULT );\n\n/** @brief Harris corner detector.\n\nThe function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and\ncornerEigenValsAndVecs , for each pixel \\f$(x, y)\\f$ it calculates a \\f$2\\times2\\f$ gradient covariance\nmatrix \\f$M^{(x,y)}\\f$ over a \\f$\\texttt{blockSize} \\times \\texttt{blockSize}\\f$ neighborhood. Then, it\ncomputes the following characteristic:\n\n\\f[\\texttt{dst} (x,y) =  \\mathrm{det} M^{(x,y)} - k  \\cdot \\left ( \\mathrm{tr} M^{(x,y)} \\right )^2\\f]\n\nCorners in the image can be found as the local maxima of this response map.\n\n@param src Input single-channel 8-bit or floating-point image.\n@param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same\nsize as src .\n@param blockSize Neighborhood size (see the details on cornerEigenValsAndVecs ).\n@param ksize Aperture parameter for the Sobel operator.\n@param k Harris detector free parameter. See the formula below.\n@param borderType Pixel extrapolation method. See cv::BorderTypes.\n */\nCV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize,\n                                int ksize, double k,\n                                int borderType = BORDER_DEFAULT );\n\n/** @brief Calculates eigenvalues and eigenvectors of image blocks for corner detection.\n\nFor every pixel \\f$p\\f$ , the function cornerEigenValsAndVecs considers a blockSize \\f$\\times\\f$ blockSize\nneighborhood \\f$S(p)\\f$ . It calculates the covariation matrix of derivatives over the neighborhood as:\n\n\\f[M =  \\begin{bmatrix} \\sum _{S(p)}(dI/dx)^2 &  \\sum _{S(p)}dI/dx dI/dy  \\\\ \\sum _{S(p)}dI/dx dI/dy &  \\sum _{S(p)}(dI/dy)^2 \\end{bmatrix}\\f]\n\nwhere the derivatives are computed using the Sobel operator.\n\nAfter that, it finds eigenvectors and eigenvalues of \\f$M\\f$ and stores them in the destination image as\n\\f$(\\lambda_1, \\lambda_2, x_1, y_1, x_2, y_2)\\f$ where\n\n-   \\f$\\lambda_1, \\lambda_2\\f$ are the non-sorted eigenvalues of \\f$M\\f$\n-   \\f$x_1, y_1\\f$ are the eigenvectors corresponding to \\f$\\lambda_1\\f$\n-   \\f$x_2, y_2\\f$ are the eigenvectors corresponding to \\f$\\lambda_2\\f$\n\nThe output of the function can be used for robust edge or corner detection.\n\n@param src Input single-channel 8-bit or floating-point image.\n@param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .\n@param blockSize Neighborhood size (see details below).\n@param ksize Aperture parameter for the Sobel operator.\n@param borderType Pixel extrapolation method. See cv::BorderTypes.\n\n@sa  cornerMinEigenVal, cornerHarris, preCornerDetect\n */\nCV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst,\n                                          int blockSize, int ksize,\n                                          int borderType = BORDER_DEFAULT );\n\n/** @brief Calculates a feature map for corner detection.\n\nThe function calculates the complex spatial derivative-based function of the source image\n\n\\f[\\texttt{dst} = (D_x  \\texttt{src} )^2  \\cdot D_{yy}  \\texttt{src} + (D_y  \\texttt{src} )^2  \\cdot D_{xx}  \\texttt{src} - 2 D_x  \\texttt{src} \\cdot D_y  \\texttt{src} \\cdot D_{xy}  \\texttt{src}\\f]\n\nwhere \\f$D_x\\f$,\\f$D_y\\f$ are the first image derivatives, \\f$D_{xx}\\f$,\\f$D_{yy}\\f$ are the second image\nderivatives, and \\f$D_{xy}\\f$ is the mixed derivative.\n\nThe corners can be found as local maximums of the functions, as shown below:\n@code\n    Mat corners, dilated_corners;\n    preCornerDetect(image, corners, 3);\n    // dilation with 3x3 rectangular structuring element\n    dilate(corners, dilated_corners, Mat(), 1);\n    Mat corner_mask = corners == dilated_corners;\n@endcode\n\n@param src Source single-channel 8-bit of floating-point image.\n@param dst Output image that has the type CV_32F and the same size as src .\n@param ksize %Aperture size of the Sobel .\n@param borderType Pixel extrapolation method. See cv::BorderTypes.\n */\nCV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize,\n                                   int borderType = BORDER_DEFAULT );\n\n/** @brief Refines the corner locations.\n\nThe function iterates to find the sub-pixel accurate location of corners or radial saddle points, as\nshown on the figure below.\n\n![image](pics/cornersubpix.png)\n\nSub-pixel accurate corner locator is based on the observation that every vector from the center \\f$q\\f$\nto a point \\f$p\\f$ located within a neighborhood of \\f$q\\f$ is orthogonal to the image gradient at \\f$p\\f$\nsubject to image and measurement noise. Consider the expression:\n\n\\f[\\epsilon _i = {DI_{p_i}}^T  \\cdot (q - p_i)\\f]\n\nwhere \\f${DI_{p_i}}\\f$ is an image gradient at one of the points \\f$p_i\\f$ in a neighborhood of \\f$q\\f$ . The\nvalue of \\f$q\\f$ is to be found so that \\f$\\epsilon_i\\f$ is minimized. A system of equations may be set up\nwith \\f$\\epsilon_i\\f$ set to zero:\n\n\\f[\\sum _i(DI_{p_i}  \\cdot {DI_{p_i}}^T) -  \\sum _i(DI_{p_i}  \\cdot {DI_{p_i}}^T  \\cdot p_i)\\f]\n\nwhere the gradients are summed within a neighborhood (\"search window\") of \\f$q\\f$ . Calling the first\ngradient term \\f$G\\f$ and the second gradient term \\f$b\\f$ gives:\n\n\\f[q = G^{-1}  \\cdot b\\f]\n\nThe algorithm sets the center of the neighborhood window at this new center \\f$q\\f$ and then iterates\nuntil the center stays within a set threshold.\n\n@param image Input image.\n@param corners Initial coordinates of the input corners and refined coordinates provided for\noutput.\n@param winSize Half of the side length of the search window. For example, if winSize=Size(5,5) ,\nthen a \\f$5*2+1 \\times 5*2+1 = 11 \\times 11\\f$ search window is used.\n@param zeroZone Half of the size of the dead region in the middle of the search zone over which\nthe summation in the formula below is not done. It is used sometimes to avoid possible\nsingularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such\na size.\n@param criteria Criteria for termination of the iterative process of corner refinement. That is,\nthe process of corner position refinement stops either after criteria.maxCount iterations or when\nthe corner position moves by less than criteria.epsilon on some iteration.\n */\nCV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners,\n                                Size winSize, Size zeroZone,\n                                TermCriteria criteria );\n\n/** @brief Determines strong corners on an image.\n\nThe function finds the most prominent corners in the image or in the specified image region, as\ndescribed in @cite Shi94\n\n-   Function calculates the corner quality measure at every source image pixel using the\n    cornerMinEigenVal or cornerHarris .\n-   Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are\n    retained).\n-   The corners with the minimal eigenvalue less than\n    \\f$\\texttt{qualityLevel} \\cdot \\max_{x,y} qualityMeasureMap(x,y)\\f$ are rejected.\n-   The remaining corners are sorted by the quality measure in the descending order.\n-   Function throws away each corner for which there is a stronger corner at a distance less than\n    maxDistance.\n\nThe function can be used to initialize a point-based tracker of an object.\n\n@note If the function is called with different values A and B of the parameter qualityLevel , and\nA \\> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector\nwith qualityLevel=B .\n\n@param image Input 8-bit or floating-point 32-bit, single-channel image.\n@param corners Output vector of detected corners.\n@param maxCorners Maximum number of corners to return. If there are more corners than are found,\nthe strongest of them is returned.\n@param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The\nparameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue\n(see cornerMinEigenVal ) or the Harris function response (see cornerHarris ). The corners with the\nquality measure less than the product are rejected. For example, if the best corner has the\nquality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure\nless than 15 are rejected.\n@param minDistance Minimum possible Euclidean distance between the returned corners.\n@param mask Optional region of interest. If the image is not empty (it needs to have the type\nCV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.\n@param blockSize Size of an average block for computing a derivative covariation matrix over each\npixel neighborhood. See cornerEigenValsAndVecs .\n@param useHarrisDetector Parameter indicating whether to use a Harris detector (see cornerHarris)\nor cornerMinEigenVal.\n@param k Free parameter of the Harris detector.\n\n@sa  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,\n */\nCV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners,\n                                     int maxCorners, double qualityLevel, double minDistance,\n                                     InputArray mask = noArray(), int blockSize = 3,\n                                     bool useHarrisDetector = false, double k = 0.04 );\n\n/** @example houghlines.cpp\nAn example using the Hough line detector\n*/\n\n/** @brief Finds lines in a binary image using the standard Hough transform.\n\nThe function implements the standard or standard multi-scale Hough transform algorithm for line\ndetection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough\ntransform.\n\n@param image 8-bit, single-channel binary source image. The image may be modified by the function.\n@param lines Output vector of lines. Each line is represented by a two-element vector\n\\f$(\\rho, \\theta)\\f$ . \\f$\\rho\\f$ is the distance from the coordinate origin \\f$(0,0)\\f$ (top-left corner of\nthe image). \\f$\\theta\\f$ is the line rotation angle in radians (\n\\f$0 \\sim \\textrm{vertical line}, \\pi/2 \\sim \\textrm{horizontal line}\\f$ ).\n@param rho Distance resolution of the accumulator in pixels.\n@param theta Angle resolution of the accumulator in radians.\n@param threshold Accumulator threshold parameter. Only those lines are returned that get enough\nvotes ( \\f$>\\texttt{threshold}\\f$ ).\n@param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .\nThe coarse accumulator distance resolution is rho and the accurate accumulator resolution is\nrho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these\nparameters should be positive.\n@param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.\n@param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.\nMust fall between 0 and max_theta.\n@param max_theta For standard and multi-scale Hough transform, maximum angle to check for lines.\nMust fall between min_theta and CV_PI.\n */\nCV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines,\n                              double rho, double theta, int threshold,\n                              double srn = 0, double stn = 0,\n                              double min_theta = 0, double max_theta = CV_PI );\n\n/** @brief Finds line segments in a binary image using the probabilistic Hough transform.\n\nThe function implements the probabilistic Hough transform algorithm for line detection, described\nin @cite Matas00\n\nSee the line detection example below:\n\n@code\n    #include <opencv2/imgproc.hpp>\n    #include <opencv2/highgui.hpp>\n\n    using namespace cv;\n    using namespace std;\n\n    int main(int argc, char** argv)\n    {\n        Mat src, dst, color_dst;\n        if( argc != 2 || !(src=imread(argv[1], 0)).data)\n            return -1;\n\n        Canny( src, dst, 50, 200, 3 );\n        cvtColor( dst, color_dst, COLOR_GRAY2BGR );\n\n    #if 0\n        vector<Vec2f> lines;\n        HoughLines( dst, lines, 1, CV_PI/180, 100 );\n\n        for( size_t i = 0; i < lines.size(); i++ )\n        {\n            float rho = lines[i][0];\n            float theta = lines[i][1];\n            double a = cos(theta), b = sin(theta);\n            double x0 = a*rho, y0 = b*rho;\n            Point pt1(cvRound(x0 + 1000*(-b)),\n                      cvRound(y0 + 1000*(a)));\n            Point pt2(cvRound(x0 - 1000*(-b)),\n                      cvRound(y0 - 1000*(a)));\n            line( color_dst, pt1, pt2, Scalar(0,0,255), 3, 8 );\n        }\n    #else\n        vector<Vec4i> lines;\n        HoughLinesP( dst, lines, 1, CV_PI/180, 80, 30, 10 );\n        for( size_t i = 0; i < lines.size(); i++ )\n        {\n            line( color_dst, Point(lines[i][0], lines[i][1]),\n                Point(lines[i][2], lines[i][3]), Scalar(0,0,255), 3, 8 );\n        }\n    #endif\n        namedWindow( \"Source\", 1 );\n        imshow( \"Source\", src );\n\n        namedWindow( \"Detected Lines\", 1 );\n        imshow( \"Detected Lines\", color_dst );\n\n        waitKey(0);\n        return 0;\n    }\n@endcode\nThis is a sample picture the function parameters have been tuned for:\n\n![image](pics/building.jpg)\n\nAnd this is the output of the above program in case of the probabilistic Hough transform:\n\n![image](pics/houghp.png)\n\n@param image 8-bit, single-channel binary source image. The image may be modified by the function.\n@param lines Output vector of lines. Each line is represented by a 4-element vector\n\\f$(x_1, y_1, x_2, y_2)\\f$ , where \\f$(x_1,y_1)\\f$ and \\f$(x_2, y_2)\\f$ are the ending points of each detected\nline segment.\n@param rho Distance resolution of the accumulator in pixels.\n@param theta Angle resolution of the accumulator in radians.\n@param threshold Accumulator threshold parameter. Only those lines are returned that get enough\nvotes ( \\f$>\\texttt{threshold}\\f$ ).\n@param minLineLength Minimum line length. Line segments shorter than that are rejected.\n@param maxLineGap Maximum allowed gap between points on the same line to link them.\n\n@sa LineSegmentDetector\n */\nCV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines,\n                               double rho, double theta, int threshold,\n                               double minLineLength = 0, double maxLineGap = 0 );\n\n/** @example houghcircles.cpp\nAn example using the Hough circle detector\n*/\n\n/** @brief Finds circles in a grayscale image using the Hough transform.\n\nThe function finds circles in a grayscale image using a modification of the Hough transform.\n\nExample: :\n@code\n    #include <opencv2/imgproc.hpp>\n    #include <opencv2/highgui.hpp>\n    #include <math.h>\n\n    using namespace cv;\n    using namespace std;\n\n    int main(int argc, char** argv)\n    {\n        Mat img, gray;\n        if( argc != 2 || !(img=imread(argv[1], 1)).data)\n            return -1;\n        cvtColor(img, gray, COLOR_BGR2GRAY);\n        // smooth it, otherwise a lot of false circles may be detected\n        GaussianBlur( gray, gray, Size(9, 9), 2, 2 );\n        vector<Vec3f> circles;\n        HoughCircles(gray, circles, HOUGH_GRADIENT,\n                     2, gray.rows/4, 200, 100 );\n        for( size_t i = 0; i < circles.size(); i++ )\n        {\n             Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));\n             int radius = cvRound(circles[i][2]);\n             // draw the circle center\n             circle( img, center, 3, Scalar(0,255,0), -1, 8, 0 );\n             // draw the circle outline\n             circle( img, center, radius, Scalar(0,0,255), 3, 8, 0 );\n        }\n        namedWindow( \"circles\", 1 );\n        imshow( \"circles\", img );\n\n        waitKey(0);\n        return 0;\n    }\n@endcode\n\n@note Usually the function detects the centers of circles well. However, it may fail to find correct\nradii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if\nyou know it. Or, you may ignore the returned radius, use only the center, and find the correct\nradius using an additional procedure.\n\n@param image 8-bit, single-channel, grayscale input image.\n@param circles Output vector of found circles. Each vector is encoded as a 3-element\nfloating-point vector \\f$(x, y, radius)\\f$ .\n@param method Detection method, see cv::HoughModes. Currently, the only implemented method is HOUGH_GRADIENT\n@param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if\ndp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has\nhalf as big width and height.\n@param minDist Minimum distance between the centers of the detected circles. If the parameter is\ntoo small, multiple neighbor circles may be falsely detected in addition to a true one. If it is\ntoo large, some circles may be missed.\n@param param1 First method-specific parameter. In case of CV_HOUGH_GRADIENT , it is the higher\nthreshold of the two passed to the Canny edge detector (the lower one is twice smaller).\n@param param2 Second method-specific parameter. In case of CV_HOUGH_GRADIENT , it is the\naccumulator threshold for the circle centers at the detection stage. The smaller it is, the more\nfalse circles may be detected. Circles, corresponding to the larger accumulator values, will be\nreturned first.\n@param minRadius Minimum circle radius.\n@param maxRadius Maximum circle radius.\n\n@sa fitEllipse, minEnclosingCircle\n */\nCV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles,\n                               int method, double dp, double minDist,\n                               double param1 = 100, double param2 = 100,\n                               int minRadius = 0, int maxRadius = 0 );\n\n//! @} imgproc_feature\n\n//! @addtogroup imgproc_filter\n//! @{\n\n/** @example morphology2.cpp\n  An example using the morphological operations\n*/\n\n/** @brief Erodes an image by using a specific structuring element.\n\nThe function erodes the source image using the specified structuring element that determines the\nshape of a pixel neighborhood over which the minimum is taken:\n\n\\f[\\texttt{dst} (x,y) =  \\min _{(x',y'):  \\, \\texttt{element} (x',y') \\ne0 } \\texttt{src} (x+x',y+y')\\f]\n\nThe function supports the in-place mode. Erosion can be applied several ( iterations ) times. In\ncase of multi-channel images, each channel is processed independently.\n\n@param src input image; the number of channels can be arbitrary, but the depth should be one of\nCV_8U, CV_16U, CV_16S, CV_32F or CV_64F.\n@param dst output image of the same size and type as src.\n@param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular\nstructuring element is used. Kernel can be created using getStructuringElement.\n@param anchor position of the anchor within the element; default value (-1, -1) means that the\nanchor is at the element center.\n@param iterations number of times erosion is applied.\n@param borderType pixel extrapolation method, see cv::BorderTypes\n@param borderValue border value in case of a constant border\n@sa  dilate, morphologyEx, getStructuringElement\n */\nCV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel,\n                         Point anchor = Point(-1,-1), int iterations = 1,\n                         int borderType = BORDER_CONSTANT,\n                         const Scalar& borderValue = morphologyDefaultBorderValue() );\n\n/** @brief Dilates an image by using a specific structuring element.\n\nThe function dilates the source image using the specified structuring element that determines the\nshape of a pixel neighborhood over which the maximum is taken:\n\\f[\\texttt{dst} (x,y) =  \\max _{(x',y'):  \\, \\texttt{element} (x',y') \\ne0 } \\texttt{src} (x+x',y+y')\\f]\n\nThe function supports the in-place mode. Dilation can be applied several ( iterations ) times. In\ncase of multi-channel images, each channel is processed independently.\n\n@param src input image; the number of channels can be arbitrary, but the depth should be one of\nCV_8U, CV_16U, CV_16S, CV_32F or CV_64F.\n@param dst output image of the same size and type as src\\`.\n@param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular\nstructuring element is used. Kernel can be created using getStructuringElement\n@param anchor position of the anchor within the element; default value (-1, -1) means that the\nanchor is at the element center.\n@param iterations number of times dilation is applied.\n@param borderType pixel extrapolation method, see cv::BorderTypes\n@param borderValue border value in case of a constant border\n@sa  erode, morphologyEx, getStructuringElement\n */\nCV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel,\n                          Point anchor = Point(-1,-1), int iterations = 1,\n                          int borderType = BORDER_CONSTANT,\n                          const Scalar& borderValue = morphologyDefaultBorderValue() );\n\n/** @brief Performs advanced morphological transformations.\n\nThe function morphologyEx can perform advanced morphological transformations using an erosion and dilation as\nbasic operations.\n\nAny of the operations can be done in-place. In case of multi-channel images, each channel is\nprocessed independently.\n\n@param src Source image. The number of channels can be arbitrary. The depth should be one of\nCV_8U, CV_16U, CV_16S, CV_32F or CV_64F.\n@param dst Destination image of the same size and type as source image.\n@param op Type of a morphological operation, see cv::MorphTypes\n@param kernel Structuring element. It can be created using cv::getStructuringElement.\n@param anchor Anchor position with the kernel. Negative values mean that the anchor is at the\nkernel center.\n@param iterations Number of times erosion and dilation are applied.\n@param borderType Pixel extrapolation method, see cv::BorderTypes\n@param borderValue Border value in case of a constant border. The default value has a special\nmeaning.\n@sa  dilate, erode, getStructuringElement\n */\nCV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst,\n                                int op, InputArray kernel,\n                                Point anchor = Point(-1,-1), int iterations = 1,\n                                int borderType = BORDER_CONSTANT,\n                                const Scalar& borderValue = morphologyDefaultBorderValue() );\n\n//! @} imgproc_filter\n\n//! @addtogroup imgproc_transform\n//! @{\n\n/** @brief Resizes an image.\n\nThe function resize resizes the image src down to or up to the specified size. Note that the\ninitial dst type or size are not taken into account. Instead, the size and type are derived from\nthe `src`,`dsize`,`fx`, and `fy`. If you want to resize src so that it fits the pre-created dst,\nyou may call the function as follows:\n@code\n    // explicitly specify dsize=dst.size(); fx and fy will be computed from that.\n    resize(src, dst, dst.size(), 0, 0, interpolation);\n@endcode\nIf you want to decimate the image by factor of 2 in each direction, you can call the function this\nway:\n@code\n    // specify fx and fy and let the function compute the destination image size.\n    resize(src, dst, Size(), 0.5, 0.5, interpolation);\n@endcode\nTo shrink an image, it will generally look best with cv::INTER_AREA interpolation, whereas to\nenlarge an image, it will generally look best with cv::INTER_CUBIC (slow) or cv::INTER_LINEAR\n(faster but still looks OK).\n\n@param src input image.\n@param dst output image; it has the size dsize (when it is non-zero) or the size computed from\nsrc.size(), fx, and fy; the type of dst is the same as of src.\n@param dsize output image size; if it equals zero, it is computed as:\n \\f[\\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\\f]\n Either dsize or both fx and fy must be non-zero.\n@param fx scale factor along the horizontal axis; when it equals 0, it is computed as\n\\f[\\texttt{(double)dsize.width/src.cols}\\f]\n@param fy scale factor along the vertical axis; when it equals 0, it is computed as\n\\f[\\texttt{(double)dsize.height/src.rows}\\f]\n@param interpolation interpolation method, see cv::InterpolationFlags\n\n@sa  warpAffine, warpPerspective, remap\n */\nCV_EXPORTS_W void resize( InputArray src, OutputArray dst,\n                          Size dsize, double fx = 0, double fy = 0,\n                          int interpolation = INTER_LINEAR );\n\n/** @brief Applies an affine transformation to an image.\n\nThe function warpAffine transforms the source image using the specified matrix:\n\n\\f[\\texttt{dst} (x,y) =  \\texttt{src} ( \\texttt{M} _{11} x +  \\texttt{M} _{12} y +  \\texttt{M} _{13}, \\texttt{M} _{21} x +  \\texttt{M} _{22} y +  \\texttt{M} _{23})\\f]\n\nwhen the flag WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted\nwith cv::invertAffineTransform and then put in the formula above instead of M. The function cannot\noperate in-place.\n\n@param src input image.\n@param dst output image that has the size dsize and the same type as src .\n@param M \\f$2\\times 3\\f$ transformation matrix.\n@param dsize size of the output image.\n@param flags combination of interpolation methods (see cv::InterpolationFlags) and the optional\nflag WARP_INVERSE_MAP that means that M is the inverse transformation (\n\\f$\\texttt{dst}\\rightarrow\\texttt{src}\\f$ ).\n@param borderMode pixel extrapolation method (see cv::BorderTypes); when\nborderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to\nthe \"outliers\" in the source image are not modified by the function.\n@param borderValue value used in case of a constant border; by default, it is 0.\n\n@sa  warpPerspective, resize, remap, getRectSubPix, transform\n */\nCV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst,\n                              InputArray M, Size dsize,\n                              int flags = INTER_LINEAR,\n                              int borderMode = BORDER_CONSTANT,\n                              const Scalar& borderValue = Scalar());\n\n/** @brief Applies a perspective transformation to an image.\n\nThe function warpPerspective transforms the source image using the specified matrix:\n\n\\f[\\texttt{dst} (x,y) =  \\texttt{src} \\left ( \\frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,\n     \\frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \\right )\\f]\n\nwhen the flag WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert\nand then put in the formula above instead of M. The function cannot operate in-place.\n\n@param src input image.\n@param dst output image that has the size dsize and the same type as src .\n@param M \\f$3\\times 3\\f$ transformation matrix.\n@param dsize size of the output image.\n@param flags combination of interpolation methods (INTER_LINEAR or INTER_NEAREST) and the\noptional flag WARP_INVERSE_MAP, that sets M as the inverse transformation (\n\\f$\\texttt{dst}\\rightarrow\\texttt{src}\\f$ ).\n@param borderMode pixel extrapolation method (BORDER_CONSTANT or BORDER_REPLICATE).\n@param borderValue value used in case of a constant border; by default, it equals 0.\n\n@sa  warpAffine, resize, remap, getRectSubPix, perspectiveTransform\n */\nCV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst,\n                                   InputArray M, Size dsize,\n                                   int flags = INTER_LINEAR,\n                                   int borderMode = BORDER_CONSTANT,\n                                   const Scalar& borderValue = Scalar());\n\n/** @brief Applies a generic geometrical transformation to an image.\n\nThe function remap transforms the source image using the specified map:\n\n\\f[\\texttt{dst} (x,y) =  \\texttt{src} (map_x(x,y),map_y(x,y))\\f]\n\nwhere values of pixels with non-integer coordinates are computed using one of available\ninterpolation methods. \\f$map_x\\f$ and \\f$map_y\\f$ can be encoded as separate floating-point maps\nin \\f$map_1\\f$ and \\f$map_2\\f$ respectively, or interleaved floating-point maps of \\f$(x,y)\\f$ in\n\\f$map_1\\f$, or fixed-point maps created by using convertMaps. The reason you might want to\nconvert from floating to fixed-point representations of a map is that they can yield much faster\n(\\~2x) remapping operations. In the converted case, \\f$map_1\\f$ contains pairs (cvFloor(x),\ncvFloor(y)) and \\f$map_2\\f$ contains indices in a table of interpolation coefficients.\n\nThis function cannot operate in-place.\n\n@param src Source image.\n@param dst Destination image. It has the same size as map1 and the same type as src .\n@param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,\nCV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating point\nrepresentation to fixed-point for speed.\n@param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map\nif map1 is (x,y) points), respectively.\n@param interpolation Interpolation method (see cv::InterpolationFlags). The method INTER_AREA is\nnot supported by this function.\n@param borderMode Pixel extrapolation method (see cv::BorderTypes). When\nborderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image that\ncorresponds to the \"outliers\" in the source image are not modified by the function.\n@param borderValue Value used in case of a constant border. By default, it is 0.\n */\nCV_EXPORTS_W void remap( InputArray src, OutputArray dst,\n                         InputArray map1, InputArray map2,\n                         int interpolation, int borderMode = BORDER_CONSTANT,\n                         const Scalar& borderValue = Scalar());\n\n/** @brief Converts image transformation maps from one representation to another.\n\nThe function converts a pair of maps for remap from one representation to another. The following\noptions ( (map1.type(), map2.type()) \\f$\\rightarrow\\f$ (dstmap1.type(), dstmap2.type()) ) are\nsupported:\n\n- \\f$\\texttt{(CV\\_32FC1, CV\\_32FC1)} \\rightarrow \\texttt{(CV\\_16SC2, CV\\_16UC1)}\\f$. This is the\nmost frequently used conversion operation, in which the original floating-point maps (see remap )\nare converted to a more compact and much faster fixed-point representation. The first output array\ncontains the rounded coordinates and the second array (created only when nninterpolation=false )\ncontains indices in the interpolation tables.\n\n- \\f$\\texttt{(CV\\_32FC2)} \\rightarrow \\texttt{(CV\\_16SC2, CV\\_16UC1)}\\f$. The same as above but\nthe original maps are stored in one 2-channel matrix.\n\n- Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same\nas the originals.\n\n@param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 .\n@param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix),\nrespectively.\n@param dstmap1 The first output map that has the type dstmap1type and the same size as src .\n@param dstmap2 The second output map.\n@param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or\nCV_32FC2 .\n@param nninterpolation Flag indicating whether the fixed-point maps are used for the\nnearest-neighbor or for a more complex interpolation.\n\n@sa  remap, undistort, initUndistortRectifyMap\n */\nCV_EXPORTS_W void convertMaps( InputArray map1, InputArray map2,\n                               OutputArray dstmap1, OutputArray dstmap2,\n                               int dstmap1type, bool nninterpolation = false );\n\n/** @brief Calculates an affine matrix of 2D rotation.\n\nThe function calculates the following matrix:\n\n\\f[\\begin{bmatrix} \\alpha &  \\beta & (1- \\alpha )  \\cdot \\texttt{center.x} -  \\beta \\cdot \\texttt{center.y} \\\\ - \\beta &  \\alpha &  \\beta \\cdot \\texttt{center.x} + (1- \\alpha )  \\cdot \\texttt{center.y} \\end{bmatrix}\\f]\n\nwhere\n\n\\f[\\begin{array}{l} \\alpha =  \\texttt{scale} \\cdot \\cos \\texttt{angle} , \\\\ \\beta =  \\texttt{scale} \\cdot \\sin \\texttt{angle} \\end{array}\\f]\n\nThe transformation maps the rotation center to itself. If this is not the target, adjust the shift.\n\n@param center Center of the rotation in the source image.\n@param angle Rotation angle in degrees. Positive values mean counter-clockwise rotation (the\ncoordinate origin is assumed to be the top-left corner).\n@param scale Isotropic scale factor.\n\n@sa  getAffineTransform, warpAffine, transform\n */\nCV_EXPORTS_W Mat getRotationMatrix2D( Point2f center, double angle, double scale );\n\n//! returns 3x3 perspective transformation for the corresponding 4 point pairs.\nCV_EXPORTS Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] );\n\n/** @brief Calculates an affine transform from three pairs of the corresponding points.\n\nThe function calculates the \\f$2 \\times 3\\f$ matrix of an affine transform so that:\n\n\\f[\\begin{bmatrix} x'_i \\\\ y'_i \\end{bmatrix} = \\texttt{map\\_matrix} \\cdot \\begin{bmatrix} x_i \\\\ y_i \\\\ 1 \\end{bmatrix}\\f]\n\nwhere\n\n\\f[dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2\\f]\n\n@param src Coordinates of triangle vertices in the source image.\n@param dst Coordinates of the corresponding triangle vertices in the destination image.\n\n@sa  warpAffine, transform\n */\nCV_EXPORTS Mat getAffineTransform( const Point2f src[], const Point2f dst[] );\n\n/** @brief Inverts an affine transformation.\n\nThe function computes an inverse affine transformation represented by \\f$2 \\times 3\\f$ matrix M:\n\n\\f[\\begin{bmatrix} a_{11} & a_{12} & b_1  \\\\ a_{21} & a_{22} & b_2 \\end{bmatrix}\\f]\n\nThe result is also a \\f$2 \\times 3\\f$ matrix of the same type as M.\n\n@param M Original affine transformation.\n@param iM Output reverse affine transformation.\n */\nCV_EXPORTS_W void invertAffineTransform( InputArray M, OutputArray iM );\n\n/** @brief Calculates a perspective transform from four pairs of the corresponding points.\n\nThe function calculates the \\f$3 \\times 3\\f$ matrix of a perspective transform so that:\n\n\\f[\\begin{bmatrix} t_i x'_i \\\\ t_i y'_i \\\\ t_i \\end{bmatrix} = \\texttt{map\\_matrix} \\cdot \\begin{bmatrix} x_i \\\\ y_i \\\\ 1 \\end{bmatrix}\\f]\n\nwhere\n\n\\f[dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3\\f]\n\n@param src Coordinates of quadrangle vertices in the source image.\n@param dst Coordinates of the corresponding quadrangle vertices in the destination image.\n\n@sa  findHomography, warpPerspective, perspectiveTransform\n */\nCV_EXPORTS_W Mat getPerspectiveTransform( InputArray src, InputArray dst );\n\nCV_EXPORTS_W Mat getAffineTransform( InputArray src, InputArray dst );\n\n/** @brief Retrieves a pixel rectangle from an image with sub-pixel accuracy.\n\nThe function getRectSubPix extracts pixels from src:\n\n\\f[dst(x, y) = src(x +  \\texttt{center.x} - ( \\texttt{dst.cols} -1)*0.5, y +  \\texttt{center.y} - ( \\texttt{dst.rows} -1)*0.5)\\f]\n\nwhere the values of the pixels at non-integer coordinates are retrieved using bilinear\ninterpolation. Every channel of multi-channel images is processed independently. While the center of\nthe rectangle must be inside the image, parts of the rectangle may be outside. In this case, the\nreplication border mode (see cv::BorderTypes) is used to extrapolate the pixel values outside of\nthe image.\n\n@param image Source image.\n@param patchSize Size of the extracted patch.\n@param center Floating point coordinates of the center of the extracted rectangle within the\nsource image. The center must be inside the image.\n@param patch Extracted patch that has the size patchSize and the same number of channels as src .\n@param patchType Depth of the extracted pixels. By default, they have the same depth as src .\n\n@sa  warpAffine, warpPerspective\n */\nCV_EXPORTS_W void getRectSubPix( InputArray image, Size patchSize,\n                                 Point2f center, OutputArray patch, int patchType = -1 );\n\n/** @example polar_transforms.cpp\nAn example using the cv::linearPolar and cv::logPolar operations\n*/\n\n/** @brief Remaps an image to log-polar space.\n\ntransforms the source image using the following transformation:\n\\f[dst( \\phi , \\rho ) = src(x,y)\\f]\nwhere\n\\f[\\rho = M  \\cdot \\log{\\sqrt{x^2 + y^2}} , \\phi =atan(y/x)\\f]\n\nThe function emulates the human \"foveal\" vision and can be used for fast scale and\nrotation-invariant template matching, for object tracking and so forth. The function can not operate\nin-place.\n\n@param src Source image\n@param dst Destination image\n@param center The transformation center; where the output precision is maximal\n@param M Magnitude scale parameter.\n@param flags A combination of interpolation methods, see cv::InterpolationFlags\n */\nCV_EXPORTS_W void logPolar( InputArray src, OutputArray dst,\n                            Point2f center, double M, int flags );\n\n/** @brief Remaps an image to polar space.\n\ntransforms the source image using the following transformation:\n\\f[dst( \\phi , \\rho ) = src(x,y)\\f]\nwhere\n\\f[\\rho = (src.width/maxRadius)  \\cdot \\sqrt{x^2 + y^2} , \\phi =atan(y/x)\\f]\n\nThe function can not operate in-place.\n\n@param src Source image\n@param dst Destination image\n@param center The transformation center;\n@param maxRadius Inverse magnitude scale parameter\n@param flags A combination of interpolation methods, see cv::InterpolationFlags\n */\nCV_EXPORTS_W void linearPolar( InputArray src, OutputArray dst,\n                               Point2f center, double maxRadius, int flags );\n\n//! @} imgproc_transform\n\n//! @addtogroup imgproc_misc\n//! @{\n\n/** @overload */\nCV_EXPORTS_W void integral( InputArray src, OutputArray sum, int sdepth = -1 );\n\n/** @overload */\nCV_EXPORTS_AS(integral2) void integral( InputArray src, OutputArray sum,\n                                        OutputArray sqsum, int sdepth = -1, int sqdepth = -1 );\n\n/** @brief Calculates the integral of an image.\n\nThe functions calculate one or more integral images for the source image as follows:\n\n\\f[\\texttt{sum} (X,Y) =  \\sum _{x<X,y<Y}  \\texttt{image} (x,y)\\f]\n\n\\f[\\texttt{sqsum} (X,Y) =  \\sum _{x<X,y<Y}  \\texttt{image} (x,y)^2\\f]\n\n\\f[\\texttt{tilted} (X,Y) =  \\sum _{y<Y,abs(x-X+1) \\leq Y-y-1}  \\texttt{image} (x,y)\\f]\n\nUsing these integral images, you can calculate sum, mean, and standard deviation over a specific\nup-right or rotated rectangular region of the image in a constant time, for example:\n\n\\f[\\sum _{x_1 \\leq x < x_2,  \\, y_1  \\leq y < y_2}  \\texttt{image} (x,y) =  \\texttt{sum} (x_2,y_2)- \\texttt{sum} (x_1,y_2)- \\texttt{sum} (x_2,y_1)+ \\texttt{sum} (x_1,y_1)\\f]\n\nIt makes possible to do a fast blurring or fast block correlation with a variable window size, for\nexample. In case of multi-channel images, sums for each channel are accumulated independently.\n\nAs a practical example, the next figure shows the calculation of the integral of a straight\nrectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the\noriginal image are shown, as well as the relative pixels in the integral images sum and tilted .\n\n![integral calculation example](pics/integral.png)\n\n@param src input image as \\f$W \\times H\\f$, 8-bit or floating-point (32f or 64f).\n@param sum integral image as \\f$(W+1)\\times (H+1)\\f$ , 32-bit integer or floating-point (32f or 64f).\n@param sqsum integral image for squared pixel values; it is \\f$(W+1)\\times (H+1)\\f$, double-precision\nfloating-point (64f) array.\n@param tilted integral for the image rotated by 45 degrees; it is \\f$(W+1)\\times (H+1)\\f$ array with\nthe same data type as sum.\n@param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or\nCV_64F.\n@param sqdepth desired depth of the integral image of squared pixel values, CV_32F or CV_64F.\n */\nCV_EXPORTS_AS(integral3) void integral( InputArray src, OutputArray sum,\n                                        OutputArray sqsum, OutputArray tilted,\n                                        int sdepth = -1, int sqdepth = -1 );\n\n//! @} imgproc_misc\n\n//! @addtogroup imgproc_motion\n//! @{\n\n/** @brief Adds an image to the accumulator.\n\nThe function adds src or some of its elements to dst :\n\n\\f[\\texttt{dst} (x,y)  \\leftarrow \\texttt{dst} (x,y) +  \\texttt{src} (x,y)  \\quad \\text{if} \\quad \\texttt{mask} (x,y)  \\ne 0\\f]\n\nThe function supports multi-channel images. Each channel is processed independently.\n\nThe functions accumulate\\* can be used, for example, to collect statistics of a scene background\nviewed by a still camera and for the further foreground-background segmentation.\n\n@param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.\n@param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit\nfloating-point.\n@param mask Optional operation mask.\n\n@sa  accumulateSquare, accumulateProduct, accumulateWeighted\n */\nCV_EXPORTS_W void accumulate( InputArray src, InputOutputArray dst,\n                              InputArray mask = noArray() );\n\n/** @brief Adds the square of a source image to the accumulator.\n\nThe function adds the input image src or its selected region, raised to a power of 2, to the\naccumulator dst :\n\n\\f[\\texttt{dst} (x,y)  \\leftarrow \\texttt{dst} (x,y) +  \\texttt{src} (x,y)^2  \\quad \\text{if} \\quad \\texttt{mask} (x,y)  \\ne 0\\f]\n\nThe function supports multi-channel images. Each channel is processed independently.\n\n@param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.\n@param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit\nfloating-point.\n@param mask Optional operation mask.\n\n@sa  accumulateSquare, accumulateProduct, accumulateWeighted\n */\nCV_EXPORTS_W void accumulateSquare( InputArray src, InputOutputArray dst,\n                                    InputArray mask = noArray() );\n\n/** @brief Adds the per-element product of two input images to the accumulator.\n\nThe function adds the product of two images or their selected regions to the accumulator dst :\n\n\\f[\\texttt{dst} (x,y)  \\leftarrow \\texttt{dst} (x,y) +  \\texttt{src1} (x,y)  \\cdot \\texttt{src2} (x,y)  \\quad \\text{if} \\quad \\texttt{mask} (x,y)  \\ne 0\\f]\n\nThe function supports multi-channel images. Each channel is processed independently.\n\n@param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating point.\n@param src2 Second input image of the same type and the same size as src1 .\n@param dst %Accumulator with the same number of channels as input images, 32-bit or 64-bit\nfloating-point.\n@param mask Optional operation mask.\n\n@sa  accumulate, accumulateSquare, accumulateWeighted\n */\nCV_EXPORTS_W void accumulateProduct( InputArray src1, InputArray src2,\n                                     InputOutputArray dst, InputArray mask=noArray() );\n\n/** @brief Updates a running average.\n\nThe function calculates the weighted sum of the input image src and the accumulator dst so that dst\nbecomes a running average of a frame sequence:\n\n\\f[\\texttt{dst} (x,y)  \\leftarrow (1- \\texttt{alpha} )  \\cdot \\texttt{dst} (x,y) +  \\texttt{alpha} \\cdot \\texttt{src} (x,y)  \\quad \\text{if} \\quad \\texttt{mask} (x,y)  \\ne 0\\f]\n\nThat is, alpha regulates the update speed (how fast the accumulator \"forgets\" about earlier images).\nThe function supports multi-channel images. Each channel is processed independently.\n\n@param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.\n@param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit\nfloating-point.\n@param alpha Weight of the input image.\n@param mask Optional operation mask.\n\n@sa  accumulate, accumulateSquare, accumulateProduct\n */\nCV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst,\n                                      double alpha, InputArray mask = noArray() );\n\n/** @brief The function is used to detect translational shifts that occur between two images.\n\nThe operation takes advantage of the Fourier shift theorem for detecting the translational shift in\nthe frequency domain. It can be used for fast image registration as well as motion estimation. For\nmore information please see <http://en.wikipedia.org/wiki/Phase_correlation>\n\nCalculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed\nwith getOptimalDFTSize.\n\nThe function performs the following equations:\n- First it applies a Hanning window (see <http://en.wikipedia.org/wiki/Hann_function>) to each\nimage to remove possible edge effects. This window is cached until the array size changes to speed\nup processing time.\n- Next it computes the forward DFTs of each source array:\n\\f[\\mathbf{G}_a = \\mathcal{F}\\{src_1\\}, \\; \\mathbf{G}_b = \\mathcal{F}\\{src_2\\}\\f]\nwhere \\f$\\mathcal{F}\\f$ is the forward DFT.\n- It then computes the cross-power spectrum of each frequency domain array:\n\\f[R = \\frac{ \\mathbf{G}_a \\mathbf{G}_b^*}{|\\mathbf{G}_a \\mathbf{G}_b^*|}\\f]\n- Next the cross-correlation is converted back into the time domain via the inverse DFT:\n\\f[r = \\mathcal{F}^{-1}\\{R\\}\\f]\n- Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to\nachieve sub-pixel accuracy.\n\\f[(\\Delta x, \\Delta y) = \\texttt{weightedCentroid} \\{\\arg \\max_{(x, y)}\\{r\\}\\}\\f]\n- If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5\ncentroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single\npeak) and will be smaller when there are multiple peaks.\n\n@param src1 Source floating point array (CV_32FC1 or CV_64FC1)\n@param src2 Source floating point array (CV_32FC1 or CV_64FC1)\n@param window Floating point array with windowing coefficients to reduce edge effects (optional).\n@param response Signal power within the 5x5 centroid around the peak, between 0 and 1 (optional).\n@returns detected phase shift (sub-pixel) between the two arrays.\n\n@sa dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow\n */\nCV_EXPORTS_W Point2d phaseCorrelate(InputArray src1, InputArray src2,\n                                    InputArray window = noArray(), CV_OUT double* response = 0);\n\n/** @brief This function computes a Hanning window coefficients in two dimensions.\n\nSee (http://en.wikipedia.org/wiki/Hann_function) and (http://en.wikipedia.org/wiki/Window_function)\nfor more information.\n\nAn example is shown below:\n@code\n    // create hanning window of size 100x100 and type CV_32F\n    Mat hann;\n    createHanningWindow(hann, Size(100, 100), CV_32F);\n@endcode\n@param dst Destination array to place Hann coefficients in\n@param winSize The window size specifications\n@param type Created array type\n */\nCV_EXPORTS_W void createHanningWindow(OutputArray dst, Size winSize, int type);\n\n//! @} imgproc_motion\n\n//! @addtogroup imgproc_misc\n//! @{\n\n/** @brief Applies a fixed-level threshold to each array element.\n\nThe function applies fixed-level thresholding to a single-channel array. The function is typically\nused to get a bi-level (binary) image out of a grayscale image ( cv::compare could be also used for\nthis purpose) or for removing a noise, that is, filtering out pixels with too small or too large\nvalues. There are several types of thresholding supported by the function. They are determined by\ntype parameter.\n\nAlso, the special values cv::THRESH_OTSU or cv::THRESH_TRIANGLE may be combined with one of the\nabove values. In these cases, the function determines the optimal threshold value using the Otsu's\nor Triangle algorithm and uses it instead of the specified thresh . The function returns the\ncomputed threshold value. Currently, the Otsu's and Triangle methods are implemented only for 8-bit\nimages.\n\n@param src input array (single-channel, 8-bit or 32-bit floating point).\n@param dst output array of the same size and type as src.\n@param thresh threshold value.\n@param maxval maximum value to use with the THRESH_BINARY and THRESH_BINARY_INV thresholding\ntypes.\n@param type thresholding type (see the cv::ThresholdTypes).\n\n@sa  adaptiveThreshold, findContours, compare, min, max\n */\nCV_EXPORTS_W double threshold( InputArray src, OutputArray dst,\n                               double thresh, double maxval, int type );\n\n\n/** @brief Applies an adaptive threshold to an array.\n\nThe function transforms a grayscale image to a binary image according to the formulae:\n-   **THRESH_BINARY**\n    \\f[dst(x,y) =  \\fork{\\texttt{maxValue}}{if \\(src(x,y) > T(x,y)\\)}{0}{otherwise}\\f]\n-   **THRESH_BINARY_INV**\n    \\f[dst(x,y) =  \\fork{0}{if \\(src(x,y) > T(x,y)\\)}{\\texttt{maxValue}}{otherwise}\\f]\nwhere \\f$T(x,y)\\f$ is a threshold calculated individually for each pixel (see adaptiveMethod parameter).\n\nThe function can process the image in-place.\n\n@param src Source 8-bit single-channel image.\n@param dst Destination image of the same size and the same type as src.\n@param maxValue Non-zero value assigned to the pixels for which the condition is satisfied\n@param adaptiveMethod Adaptive thresholding algorithm to use, see cv::AdaptiveThresholdTypes\n@param thresholdType Thresholding type that must be either THRESH_BINARY or THRESH_BINARY_INV,\nsee cv::ThresholdTypes.\n@param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the\npixel: 3, 5, 7, and so on.\n@param C Constant subtracted from the mean or weighted mean (see the details below). Normally, it\nis positive but may be zero or negative as well.\n\n@sa  threshold, blur, GaussianBlur\n */\nCV_EXPORTS_W void adaptiveThreshold( InputArray src, OutputArray dst,\n                                     double maxValue, int adaptiveMethod,\n                                     int thresholdType, int blockSize, double C );\n\n//! @} imgproc_misc\n\n//! @addtogroup imgproc_filter\n//! @{\n\n/** @brief Blurs an image and downsamples it.\n\nBy default, size of the output image is computed as `Size((src.cols+1)/2, (src.rows+1)/2)`, but in\nany case, the following conditions should be satisfied:\n\n\\f[\\begin{array}{l} | \\texttt{dstsize.width} *2-src.cols| \\leq 2 \\\\ | \\texttt{dstsize.height} *2-src.rows| \\leq 2 \\end{array}\\f]\n\nThe function performs the downsampling step of the Gaussian pyramid construction. First, it\nconvolves the source image with the kernel:\n\n\\f[\\frac{1}{256} \\begin{bmatrix} 1 & 4 & 6 & 4 & 1  \\\\ 4 & 16 & 24 & 16 & 4  \\\\ 6 & 24 & 36 & 24 & 6  \\\\ 4 & 16 & 24 & 16 & 4  \\\\ 1 & 4 & 6 & 4 & 1 \\end{bmatrix}\\f]\n\nThen, it downsamples the image by rejecting even rows and columns.\n\n@param src input image.\n@param dst output image; it has the specified size and the same type as src.\n@param dstsize size of the output image.\n@param borderType Pixel extrapolation method, see cv::BorderTypes (BORDER_CONSTANT isn't supported)\n */\nCV_EXPORTS_W void pyrDown( InputArray src, OutputArray dst,\n                           const Size& dstsize = Size(), int borderType = BORDER_DEFAULT );\n\n/** @brief Upsamples an image and then blurs it.\n\nBy default, size of the output image is computed as `Size(src.cols\\*2, (src.rows\\*2)`, but in any\ncase, the following conditions should be satisfied:\n\n\\f[\\begin{array}{l} | \\texttt{dstsize.width} -src.cols*2| \\leq  ( \\texttt{dstsize.width}   \\mod  2)  \\\\ | \\texttt{dstsize.height} -src.rows*2| \\leq  ( \\texttt{dstsize.height}   \\mod  2) \\end{array}\\f]\n\nThe function performs the upsampling step of the Gaussian pyramid construction, though it can\nactually be used to construct the Laplacian pyramid. First, it upsamples the source image by\ninjecting even zero rows and columns and then convolves the result with the same kernel as in\npyrDown multiplied by 4.\n\n@param src input image.\n@param dst output image. It has the specified size and the same type as src .\n@param dstsize size of the output image.\n@param borderType Pixel extrapolation method, see cv::BorderTypes (only BORDER_DEFAULT is supported)\n */\nCV_EXPORTS_W void pyrUp( InputArray src, OutputArray dst,\n                         const Size& dstsize = Size(), int borderType = BORDER_DEFAULT );\n\n/** @brief Constructs the Gaussian pyramid for an image.\n\nThe function constructs a vector of images and builds the Gaussian pyramid by recursively applying\npyrDown to the previously built pyramid layers, starting from `dst[0]==src`.\n\n@param src Source image. Check pyrDown for the list of supported types.\n@param dst Destination vector of maxlevel+1 images of the same type as src. dst[0] will be the\nsame as src. dst[1] is the next pyramid layer, a smoothed and down-sized src, and so on.\n@param maxlevel 0-based index of the last (the smallest) pyramid layer. It must be non-negative.\n@param borderType Pixel extrapolation method, see cv::BorderTypes (BORDER_CONSTANT isn't supported)\n */\nCV_EXPORTS void buildPyramid( InputArray src, OutputArrayOfArrays dst,\n                              int maxlevel, int borderType = BORDER_DEFAULT );\n\n//! @} imgproc_filter\n\n//! @addtogroup imgproc_transform\n//! @{\n\n/** @brief Transforms an image to compensate for lens distortion.\n\nThe function transforms an image to compensate radial and tangential lens distortion.\n\nThe function is simply a combination of cv::initUndistortRectifyMap (with unity R ) and cv::remap\n(with bilinear interpolation). See the former function for details of the transformation being\nperformed.\n\nThose pixels in the destination image, for which there is no correspondent pixels in the source\nimage, are filled with zeros (black color).\n\nA particular subset of the source image that will be visible in the corrected image can be regulated\nby newCameraMatrix. You can use cv::getOptimalNewCameraMatrix to compute the appropriate\nnewCameraMatrix depending on your requirements.\n\nThe camera matrix and the distortion parameters can be determined using cv::calibrateCamera. If\nthe resolution of images is different from the resolution used at the calibration stage, \\f$f_x,\nf_y, c_x\\f$ and \\f$c_y\\f$ need to be scaled accordingly, while the distortion coefficients remain\nthe same.\n\n@param src Input (distorted) image.\n@param dst Output (corrected) image that has the same size and type as src .\n@param cameraMatrix Input camera matrix \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ .\n@param distCoeffs Input vector of distortion coefficients\n\\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f$\nof 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.\n@param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as\ncameraMatrix but you may additionally scale and shift the result by using a different matrix.\n */\nCV_EXPORTS_W void undistort( InputArray src, OutputArray dst,\n                             InputArray cameraMatrix,\n                             InputArray distCoeffs,\n                             InputArray newCameraMatrix = noArray() );\n\n/** @brief Computes the undistortion and rectification transformation map.\n\nThe function computes the joint undistortion and rectification transformation and represents the\nresult in the form of maps for remap. The undistorted image looks like original, as if it is\ncaptured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a\nmonocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by\ncv::getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera,\nnewCameraMatrix is normally set to P1 or P2 computed by cv::stereoRectify .\n\nAlso, this new camera is oriented differently in the coordinate space, according to R. That, for\nexample, helps to align two heads of a stereo camera so that the epipolar lines on both images\nbecome horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera).\n\nThe function actually builds the maps for the inverse mapping algorithm that is used by remap. That\nis, for each pixel \\f$(u, v)\\f$ in the destination (corrected and rectified) image, the function\ncomputes the corresponding coordinates in the source image (that is, in the original image from\ncamera). The following process is applied:\n\\f[\n\\begin{array}{l}\nx  \\leftarrow (u - {c'}_x)/{f'}_x  \\\\\ny  \\leftarrow (v - {c'}_y)/{f'}_y  \\\\\n{[X\\,Y\\,W]} ^T  \\leftarrow R^{-1}*[x \\, y \\, 1]^T  \\\\\nx'  \\leftarrow X/W  \\\\\ny'  \\leftarrow Y/W  \\\\\nr^2  \\leftarrow x'^2 + y'^2 \\\\\nx''  \\leftarrow x' \\frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}\n+ 2p_1 x' y' + p_2(r^2 + 2 x'^2)  + s_1 r^2 + s_2 r^4\\\\\ny''  \\leftarrow y' \\frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}\n+ p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\\\\ns\\vecthree{x'''}{y'''}{1} =\n\\vecthreethree{R_{33}(\\tau_x, \\tau_y)}{0}{-R_{13}((\\tau_x, \\tau_y)}\n{0}{R_{33}(\\tau_x, \\tau_y)}{-R_{23}(\\tau_x, \\tau_y)}\n{0}{0}{1} R(\\tau_x, \\tau_y) \\vecthree{x''}{y''}{1}\\\\\nmap_x(u,v)  \\leftarrow x''' f_x + c_x  \\\\\nmap_y(u,v)  \\leftarrow y''' f_y + c_y\n\\end{array}\n\\f]\nwhere \\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f$\nare the distortion coefficients.\n\nIn case of a stereo camera, this function is called twice: once for each camera head, after\nstereoRectify, which in its turn is called after cv::stereoCalibrate. But if the stereo camera\nwas not calibrated, it is still possible to compute the rectification transformations directly from\nthe fundamental matrix using cv::stereoRectifyUncalibrated. For each camera, the function computes\nhomography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D\nspace. R can be computed from H as\n\\f[\\texttt{R} = \\texttt{cameraMatrix} ^{-1} \\cdot \\texttt{H} \\cdot \\texttt{cameraMatrix}\\f]\nwhere cameraMatrix can be chosen arbitrarily.\n\n@param cameraMatrix Input camera matrix \\f$A=\\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ .\n@param distCoeffs Input vector of distortion coefficients\n\\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f$\nof 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.\n@param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,\ncomputed by stereoRectify can be passed here. If the matrix is empty, the identity transformation\nis assumed. In cvInitUndistortMap R assumed to be an identity matrix.\n@param newCameraMatrix New camera matrix \\f$A'=\\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\\f$.\n@param size Undistorted image size.\n@param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2, see cv::convertMaps\n@param map1 The first output map.\n@param map2 The second output map.\n */\nCV_EXPORTS_W void initUndistortRectifyMap( InputArray cameraMatrix, InputArray distCoeffs,\n                           InputArray R, InputArray newCameraMatrix,\n                           Size size, int m1type, OutputArray map1, OutputArray map2 );\n\n//! initializes maps for cv::remap() for wide-angle\nCV_EXPORTS_W float initWideAngleProjMap( InputArray cameraMatrix, InputArray distCoeffs,\n                                         Size imageSize, int destImageWidth,\n                                         int m1type, OutputArray map1, OutputArray map2,\n                                         int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0);\n\n/** @brief Returns the default new camera matrix.\n\nThe function returns the camera matrix that is either an exact copy of the input cameraMatrix (when\ncenterPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).\n\nIn the latter case, the new camera matrix will be:\n\n\\f[\\begin{bmatrix} f_x && 0 && ( \\texttt{imgSize.width} -1)*0.5  \\\\ 0 && f_y && ( \\texttt{imgSize.height} -1)*0.5  \\\\ 0 && 0 && 1 \\end{bmatrix} ,\\f]\n\nwhere \\f$f_x\\f$ and \\f$f_y\\f$ are \\f$(0,0)\\f$ and \\f$(1,1)\\f$ elements of cameraMatrix, respectively.\n\nBy default, the undistortion functions in OpenCV (see initUndistortRectifyMap, undistort) do not\nmove the principal point. However, when you work with stereo, it is important to move the principal\npoints in both views to the same y-coordinate (which is required by most of stereo correspondence\nalgorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for\neach view where the principal points are located at the center.\n\n@param cameraMatrix Input camera matrix.\n@param imgsize Camera view image size in pixels.\n@param centerPrincipalPoint Location of the principal point in the new camera matrix. The\nparameter indicates whether this location should be at the image center or not.\n */\nCV_EXPORTS_W Mat getDefaultNewCameraMatrix( InputArray cameraMatrix, Size imgsize = Size(),\n                                            bool centerPrincipalPoint = false );\n\n/** @brief Computes the ideal point coordinates from the observed point coordinates.\n\nThe function is similar to cv::undistort and cv::initUndistortRectifyMap but it operates on a\nsparse set of points instead of a raster image. Also the function performs a reverse transformation\nto projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a\nplanar object, it does, up to a translation vector, if the proper R is specified.\n@code\n    // (u,v) is the input point, (u', v') is the output point\n    // camera_matrix=[fx 0 cx; 0 fy cy; 0 0 1]\n    // P=[fx' 0 cx' tx; 0 fy' cy' ty; 0 0 1 tz]\n    x\" = (u - cx)/fx\n    y\" = (v - cy)/fy\n    (x',y') = undistort(x\",y\",dist_coeffs)\n    [X,Y,W]T = R*[x' y' 1]T\n    x = X/W, y = Y/W\n    // only performed if P=[fx' 0 cx' [tx]; 0 fy' cy' [ty]; 0 0 1 [tz]] is specified\n    u' = x*fx' + cx'\n    v' = y*fy' + cy',\n@endcode\nwhere cv::undistort is an approximate iterative algorithm that estimates the normalized original\npoint coordinates out of the normalized distorted point coordinates (\"normalized\" means that the\ncoordinates do not depend on the camera matrix).\n\nThe function can be used for both a stereo camera head or a monocular camera (when R is empty).\n\n@param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2).\n@param dst Output ideal point coordinates after undistortion and reverse perspective\ntransformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.\n@param cameraMatrix Camera matrix \\f$\\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ .\n@param distCoeffs Input vector of distortion coefficients\n\\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]])\\f$\nof 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.\n@param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by\ncv::stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.\n@param P New camera matrix (3x3) or new projection matrix (3x4). P1 or P2 computed by\ncv::stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.\n */\nCV_EXPORTS_W void undistortPoints( InputArray src, OutputArray dst,\n                                   InputArray cameraMatrix, InputArray distCoeffs,\n                                   InputArray R = noArray(), InputArray P = noArray());\n\n//! @} imgproc_transform\n\n//! @addtogroup imgproc_hist\n//! @{\n\n/** @example demhist.cpp\nAn example for creating histograms of an image\n*/\n\n/** @brief Calculates a histogram of a set of arrays.\n\nThe functions calcHist calculate the histogram of one or more arrays. The elements of a tuple used\nto increment a histogram bin are taken from the corresponding input arrays at the same location. The\nsample below shows how to compute a 2D Hue-Saturation histogram for a color image. :\n@code\n    #include <opencv2/imgproc.hpp>\n    #include <opencv2/highgui.hpp>\n\n    using namespace cv;\n\n    int main( int argc, char** argv )\n    {\n        Mat src, hsv;\n        if( argc != 2 || !(src=imread(argv[1], 1)).data )\n            return -1;\n\n        cvtColor(src, hsv, COLOR_BGR2HSV);\n\n        // Quantize the hue to 30 levels\n        // and the saturation to 32 levels\n        int hbins = 30, sbins = 32;\n        int histSize[] = {hbins, sbins};\n        // hue varies from 0 to 179, see cvtColor\n        float hranges[] = { 0, 180 };\n        // saturation varies from 0 (black-gray-white) to\n        // 255 (pure spectrum color)\n        float sranges[] = { 0, 256 };\n        const float* ranges[] = { hranges, sranges };\n        MatND hist;\n        // we compute the histogram from the 0-th and 1-st channels\n        int channels[] = {0, 1};\n\n        calcHist( &hsv, 1, channels, Mat(), // do not use mask\n                 hist, 2, histSize, ranges,\n                 true, // the histogram is uniform\n                 false );\n        double maxVal=0;\n        minMaxLoc(hist, 0, &maxVal, 0, 0);\n\n        int scale = 10;\n        Mat histImg = Mat::zeros(sbins*scale, hbins*10, CV_8UC3);\n\n        for( int h = 0; h < hbins; h++ )\n            for( int s = 0; s < sbins; s++ )\n            {\n                float binVal = hist.at<float>(h, s);\n                int intensity = cvRound(binVal*255/maxVal);\n                rectangle( histImg, Point(h*scale, s*scale),\n                            Point( (h+1)*scale - 1, (s+1)*scale - 1),\n                            Scalar::all(intensity),\n                            CV_FILLED );\n            }\n\n        namedWindow( \"Source\", 1 );\n        imshow( \"Source\", src );\n\n        namedWindow( \"H-S Histogram\", 1 );\n        imshow( \"H-S Histogram\", histImg );\n        waitKey();\n    }\n@endcode\n\n@param images Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same\nsize. Each of them can have an arbitrary number of channels.\n@param nimages Number of source images.\n@param channels List of the dims channels used to compute the histogram. The first array channels\nare numerated from 0 to images[0].channels()-1 , the second array channels are counted from\nimages[0].channels() to images[0].channels() + images[1].channels()-1, and so on.\n@param mask Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size\nas images[i] . The non-zero mask elements mark the array elements counted in the histogram.\n@param hist Output histogram, which is a dense or sparse dims -dimensional array.\n@param dims Histogram dimensionality that must be positive and not greater than CV_MAX_DIMS\n(equal to 32 in the current OpenCV version).\n@param histSize Array of histogram sizes in each dimension.\n@param ranges Array of the dims arrays of the histogram bin boundaries in each dimension. When the\nhistogram is uniform ( uniform =true), then for each dimension i it is enough to specify the lower\n(inclusive) boundary \\f$L_0\\f$ of the 0-th histogram bin and the upper (exclusive) boundary\n\\f$U_{\\texttt{histSize}[i]-1}\\f$ for the last histogram bin histSize[i]-1 . That is, in case of a\nuniform histogram each of ranges[i] is an array of 2 elements. When the histogram is not uniform (\nuniform=false ), then each of ranges[i] contains histSize[i]+1 elements:\n\\f$L_0, U_0=L_1, U_1=L_2, ..., U_{\\texttt{histSize[i]}-2}=L_{\\texttt{histSize[i]}-1}, U_{\\texttt{histSize[i]}-1}\\f$\n. The array elements, that are not between \\f$L_0\\f$ and \\f$U_{\\texttt{histSize[i]}-1}\\f$ , are not\ncounted in the histogram.\n@param uniform Flag indicating whether the histogram is uniform or not (see above).\n@param accumulate Accumulation flag. If it is set, the histogram is not cleared in the beginning\nwhen it is allocated. This feature enables you to compute a single histogram from several sets of\narrays, or to update the histogram in time.\n*/\nCV_EXPORTS void calcHist( const Mat* images, int nimages,\n                          const int* channels, InputArray mask,\n                          OutputArray hist, int dims, const int* histSize,\n                          const float** ranges, bool uniform = true, bool accumulate = false );\n\n/** @overload\n\nthis variant uses cv::SparseMat for output\n*/\nCV_EXPORTS void calcHist( const Mat* images, int nimages,\n                          const int* channels, InputArray mask,\n                          SparseMat& hist, int dims,\n                          const int* histSize, const float** ranges,\n                          bool uniform = true, bool accumulate = false );\n\n/** @overload */\nCV_EXPORTS_W void calcHist( InputArrayOfArrays images,\n                            const std::vector<int>& channels,\n                            InputArray mask, OutputArray hist,\n                            const std::vector<int>& histSize,\n                            const std::vector<float>& ranges,\n                            bool accumulate = false );\n\n/** @brief Calculates the back projection of a histogram.\n\nThe functions calcBackProject calculate the back project of the histogram. That is, similarly to\ncv::calcHist , at each location (x, y) the function collects the values from the selected channels\nin the input images and finds the corresponding histogram bin. But instead of incrementing it, the\nfunction reads the bin value, scales it by scale , and stores in backProject(x,y) . In terms of\nstatistics, the function computes probability of each element value in respect with the empirical\nprobability distribution represented by the histogram. See how, for example, you can find and track\na bright-colored object in a scene:\n\n- Before tracking, show the object to the camera so that it covers almost the whole frame.\nCalculate a hue histogram. The histogram may have strong maximums, corresponding to the dominant\ncolors in the object.\n\n- When tracking, calculate a back projection of a hue plane of each input video frame using that\npre-computed histogram. Threshold the back projection to suppress weak colors. It may also make\nsense to suppress pixels with non-sufficient color saturation and too dark or too bright pixels.\n\n- Find connected components in the resulting picture and choose, for example, the largest\ncomponent.\n\nThis is an approximate algorithm of the CamShift color object tracker.\n\n@param images Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same\nsize. Each of them can have an arbitrary number of channels.\n@param nimages Number of source images.\n@param channels The list of channels used to compute the back projection. The number of channels\nmust match the histogram dimensionality. The first array channels are numerated from 0 to\nimages[0].channels()-1 , the second array channels are counted from images[0].channels() to\nimages[0].channels() + images[1].channels()-1, and so on.\n@param hist Input histogram that can be dense or sparse.\n@param backProject Destination back projection array that is a single-channel array of the same\nsize and depth as images[0] .\n@param ranges Array of arrays of the histogram bin boundaries in each dimension. See calcHist .\n@param scale Optional scale factor for the output back projection.\n@param uniform Flag indicating whether the histogram is uniform or not (see above).\n\n@sa cv::calcHist, cv::compareHist\n */\nCV_EXPORTS void calcBackProject( const Mat* images, int nimages,\n                                 const int* channels, InputArray hist,\n                                 OutputArray backProject, const float** ranges,\n                                 double scale = 1, bool uniform = true );\n\n/** @overload */\nCV_EXPORTS void calcBackProject( const Mat* images, int nimages,\n                                 const int* channels, const SparseMat& hist,\n                                 OutputArray backProject, const float** ranges,\n                                 double scale = 1, bool uniform = true );\n\n/** @overload */\nCV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const std::vector<int>& channels,\n                                   InputArray hist, OutputArray dst,\n                                   const std::vector<float>& ranges,\n                                   double scale );\n\n/** @brief Compares two histograms.\n\nThe function compare two dense or two sparse histograms using the specified method.\n\nThe function returns \\f$d(H_1, H_2)\\f$ .\n\nWhile the function works well with 1-, 2-, 3-dimensional dense histograms, it may not be suitable\nfor high-dimensional sparse histograms. In such histograms, because of aliasing and sampling\nproblems, the coordinates of non-zero histogram bins can slightly shift. To compare such histograms\nor more general sparse configurations of weighted points, consider using the cv::EMD function.\n\n@param H1 First compared histogram.\n@param H2 Second compared histogram of the same size as H1 .\n@param method Comparison method, see cv::HistCompMethods\n */\nCV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method );\n\n/** @overload */\nCV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method );\n\n/** @brief Equalizes the histogram of a grayscale image.\n\nThe function equalizes the histogram of the input image using the following algorithm:\n\n- Calculate the histogram \\f$H\\f$ for src .\n- Normalize the histogram so that the sum of histogram bins is 255.\n- Compute the integral of the histogram:\n\\f[H'_i =  \\sum _{0  \\le j < i} H(j)\\f]\n- Transform the image using \\f$H'\\f$ as a look-up table: \\f$\\texttt{dst}(x,y) = H'(\\texttt{src}(x,y))\\f$\n\nThe algorithm normalizes the brightness and increases the contrast of the image.\n\n@param src Source 8-bit single channel image.\n@param dst Destination image of the same size and type as src .\n */\nCV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst );\n\n/** @brief Computes the \"minimal work\" distance between two weighted point configurations.\n\nThe function computes the earth mover distance and/or a lower boundary of the distance between the\ntwo weighted point configurations. One of the applications described in @cite RubnerSept98,\n@cite Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation\nproblem that is solved using some modification of a simplex algorithm, thus the complexity is\nexponential in the worst case, though, on average it is much faster. In the case of a real metric\nthe lower boundary can be calculated even faster (using linear-time algorithm) and it can be used\nto determine roughly whether the two signatures are far enough so that they cannot relate to the\nsame object.\n\n@param signature1 First signature, a \\f$\\texttt{size1}\\times \\texttt{dims}+1\\f$ floating-point matrix.\nEach row stores the point weight followed by the point coordinates. The matrix is allowed to have\na single column (weights only) if the user-defined cost matrix is used.\n@param signature2 Second signature of the same format as signature1 , though the number of rows\nmay be different. The total weights may be different. In this case an extra \"dummy\" point is added\nto either signature1 or signature2 .\n@param distType Used metric. See cv::DistanceTypes.\n@param cost User-defined \\f$\\texttt{size1}\\times \\texttt{size2}\\f$ cost matrix. Also, if a cost matrix\nis used, lower boundary lowerBound cannot be calculated because it needs a metric function.\n@param lowerBound Optional input/output parameter: lower boundary of a distance between the two\nsignatures that is a distance between mass centers. The lower boundary may not be calculated if\nthe user-defined cost matrix is used, the total weights of point configurations are not equal, or\nif the signatures consist of weights only (the signature matrices have a single column). You\n**must** initialize \\*lowerBound . If the calculated distance between mass centers is greater or\nequal to \\*lowerBound (it means that the signatures are far enough), the function does not\ncalculate EMD. In any case \\*lowerBound is set to the calculated distance between mass centers on\nreturn. Thus, if you want to calculate both distance between mass centers and EMD, \\*lowerBound\nshould be set to 0.\n@param flow Resultant \\f$\\texttt{size1} \\times \\texttt{size2}\\f$ flow matrix: \\f$\\texttt{flow}_{i,j}\\f$ is\na flow from \\f$i\\f$ -th point of signature1 to \\f$j\\f$ -th point of signature2 .\n */\nCV_EXPORTS float EMD( InputArray signature1, InputArray signature2,\n                      int distType, InputArray cost=noArray(),\n                      float* lowerBound = 0, OutputArray flow = noArray() );\n\n//! @} imgproc_hist\n\n/** @example watershed.cpp\nAn example using the watershed algorithm\n */\n\n/** @brief Performs a marker-based image segmentation using the watershed algorithm.\n\nThe function implements one of the variants of watershed, non-parametric marker-based segmentation\nalgorithm, described in @cite Meyer92 .\n\nBefore passing the image to the function, you have to roughly outline the desired regions in the\nimage markers with positive (\\>0) indices. So, every region is represented as one or more connected\ncomponents with the pixel values 1, 2, 3, and so on. Such markers can be retrieved from a binary\nmask using findContours and drawContours (see the watershed.cpp demo). The markers are \"seeds\" of\nthe future image regions. All the other pixels in markers , whose relation to the outlined regions\nis not known and should be defined by the algorithm, should be set to 0's. In the function output,\neach pixel in markers is set to a value of the \"seed\" components or to -1 at boundaries between the\nregions.\n\n@note Any two neighbor connected components are not necessarily separated by a watershed boundary\n(-1's pixels); for example, they can touch each other in the initial marker image passed to the\nfunction.\n\n@param image Input 8-bit 3-channel image.\n@param markers Input/output 32-bit single-channel image (map) of markers. It should have the same\nsize as image .\n\n@sa findContours\n\n@ingroup imgproc_misc\n */\nCV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers );\n\n//! @addtogroup imgproc_filter\n//! @{\n\n/** @brief Performs initial step of meanshift segmentation of an image.\n\nThe function implements the filtering stage of meanshift segmentation, that is, the output of the\nfunction is the filtered \"posterized\" image with color gradients and fine-grain texture flattened.\nAt every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes\nmeanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is\nconsidered:\n\n\\f[(x,y): X- \\texttt{sp} \\le x  \\le X+ \\texttt{sp} , Y- \\texttt{sp} \\le y  \\le Y+ \\texttt{sp} , ||(R,G,B)-(r,g,b)||   \\le \\texttt{sr}\\f]\n\nwhere (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively\n(though, the algorithm does not depend on the color space used, so any 3-component color space can\nbe used instead). Over the neighborhood the average spatial value (X',Y') and average color vector\n(R',G',B') are found and they act as the neighborhood center on the next iteration:\n\n\\f[(X,Y)~(X',Y'), (R,G,B)~(R',G',B').\\f]\n\nAfter the iterations over, the color components of the initial pixel (that is, the pixel from where\nthe iterations started) are set to the final value (average color at the last iteration):\n\n\\f[I(X,Y) <- (R*,G*,B*)\\f]\n\nWhen maxLevel \\> 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is\nrun on the smallest layer first. After that, the results are propagated to the larger layer and the\niterations are run again only on those pixels where the layer colors differ by more than sr from the\nlower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the\nresults will be actually different from the ones obtained by running the meanshift procedure on the\nwhole original image (i.e. when maxLevel==0).\n\n@param src The source 8-bit, 3-channel image.\n@param dst The destination image of the same format and the same size as the source.\n@param sp The spatial window radius.\n@param sr The color window radius.\n@param maxLevel Maximum level of the pyramid for the segmentation.\n@param termcrit Termination criteria: when to stop meanshift iterations.\n */\nCV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst,\n                                         double sp, double sr, int maxLevel = 1,\n                                         TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) );\n\n//! @}\n\n//! @addtogroup imgproc_misc\n//! @{\n\n/** @example grabcut.cpp\nAn example using the GrabCut algorithm\n */\n\n/** @brief Runs the GrabCut algorithm.\n\nThe function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut).\n\n@param img Input 8-bit 3-channel image.\n@param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when\nmode is set to GC_INIT_WITH_RECT. Its elements may have one of the cv::GrabCutClasses.\n@param rect ROI containing a segmented object. The pixels outside of the ROI are marked as\n\"obvious background\". The parameter is only used when mode==GC_INIT_WITH_RECT .\n@param bgdModel Temporary array for the background model. Do not modify it while you are\nprocessing the same image.\n@param fgdModel Temporary arrays for the foreground model. Do not modify it while you are\nprocessing the same image.\n@param iterCount Number of iterations the algorithm should make before returning the result. Note\nthat the result can be refined with further calls with mode==GC_INIT_WITH_MASK or\nmode==GC_EVAL .\n@param mode Operation mode that could be one of the cv::GrabCutModes\n */\nCV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect,\n                           InputOutputArray bgdModel, InputOutputArray fgdModel,\n                           int iterCount, int mode = GC_EVAL );\n\n/** @example distrans.cpp\nAn example on using the distance transform\\\n*/\n\n\n/** @brief Calculates the distance to the closest zero pixel for each pixel of the source image.\n\nThe functions distanceTransform calculate the approximate or precise distance from every binary\nimage pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero.\n\nWhen maskSize == DIST_MASK_PRECISE and distanceType == DIST_L2 , the function runs the\nalgorithm described in @cite Felzenszwalb04 . This algorithm is parallelized with the TBB library.\n\nIn other cases, the algorithm @cite Borgefors86 is used. This means that for a pixel the function\nfinds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical,\ndiagonal, or knight's move (the latest is available for a \\f$5\\times 5\\f$ mask). The overall\ndistance is calculated as a sum of these basic distances. Since the distance function should be\nsymmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all\nthe diagonal shifts must have the same cost (denoted as `b`), and all knight's moves must have the\nsame cost (denoted as `c`). For the cv::DIST_C and cv::DIST_L1 types, the distance is calculated\nprecisely, whereas for cv::DIST_L2 (Euclidean distance) the distance can be calculated only with a\nrelative error (a \\f$5\\times 5\\f$ mask gives more accurate results). For `a`,`b`, and `c`, OpenCV\nuses the values suggested in the original paper:\n- DIST_L1: `a = 1, b = 2`\n- DIST_L2:\n    - `3 x 3`: `a=0.955, b=1.3693`\n    - `5 x 5`: `a=1, b=1.4, c=2.1969`\n- DIST_C: `a = 1, b = 1`\n\nTypically, for a fast, coarse distance estimation DIST_L2, a \\f$3\\times 3\\f$ mask is used. For a\nmore accurate distance estimation DIST_L2, a \\f$5\\times 5\\f$ mask or the precise algorithm is used.\nNote that both the precise and the approximate algorithms are linear on the number of pixels.\n\nThis variant of the function does not only compute the minimum distance for each pixel \\f$(x, y)\\f$\nbut also identifies the nearest connected component consisting of zero pixels\n(labelType==DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==DIST_LABEL_PIXEL). Index of the\ncomponent/pixel is stored in `labels(x, y)`. When labelType==DIST_LABEL_CCOMP, the function\nautomatically finds connected components of zero pixels in the input image and marks them with\ndistinct labels. When labelType==DIST_LABEL_CCOMP, the function scans through the input image and\nmarks all the zero pixels with distinct labels.\n\nIn this mode, the complexity is still linear. That is, the function provides a very fast way to\ncompute the Voronoi diagram for a binary image. Currently, the second variant can use only the\napproximate distance transform algorithm, i.e. maskSize=DIST_MASK_PRECISE is not supported\nyet.\n\n@param src 8-bit, single-channel (binary) source image.\n@param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,\nsingle-channel image of the same size as src.\n@param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type\nCV_32SC1 and the same size as src.\n@param distanceType Type of distance, see cv::DistanceTypes\n@param maskSize Size of the distance transform mask, see cv::DistanceTransformMasks.\nDIST_MASK_PRECISE is not supported by this variant. In case of the DIST_L1 or DIST_C distance type,\nthe parameter is forced to 3 because a \\f$3\\times 3\\f$ mask gives the same result as \\f$5\\times\n5\\f$ or any larger aperture.\n@param labelType Type of the label array to build, see cv::DistanceTransformLabelTypes.\n */\nCV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst,\n                                     OutputArray labels, int distanceType, int maskSize,\n                                     int labelType = DIST_LABEL_CCOMP );\n\n/** @overload\n@param src 8-bit, single-channel (binary) source image.\n@param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,\nsingle-channel image of the same size as src .\n@param distanceType Type of distance, see cv::DistanceTypes\n@param maskSize Size of the distance transform mask, see cv::DistanceTransformMasks. In case of the\nDIST_L1 or DIST_C distance type, the parameter is forced to 3 because a \\f$3\\times 3\\f$ mask gives\nthe same result as \\f$5\\times 5\\f$ or any larger aperture.\n@param dstType Type of output image. It can be CV_8U or CV_32F. Type CV_8U can be used only for\nthe first variant of the function and distanceType == DIST_L1.\n*/\nCV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst,\n                                     int distanceType, int maskSize, int dstType=CV_32F);\n\n/** @example ffilldemo.cpp\n  An example using the FloodFill technique\n*/\n\n/** @overload\n\nvariant without `mask` parameter\n*/\nCV_EXPORTS int floodFill( InputOutputArray image,\n                          Point seedPoint, Scalar newVal, CV_OUT Rect* rect = 0,\n                          Scalar loDiff = Scalar(), Scalar upDiff = Scalar(),\n                          int flags = 4 );\n\n/** @brief Fills a connected component with the given color.\n\nThe functions floodFill fill a connected component starting from the seed point with the specified\ncolor. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The\npixel at \\f$(x,y)\\f$ is considered to belong to the repainted domain if:\n\n- in case of a grayscale image and floating range\n\\f[\\texttt{src} (x',y')- \\texttt{loDiff} \\leq \\texttt{src} (x,y)  \\leq \\texttt{src} (x',y')+ \\texttt{upDiff}\\f]\n\n\n- in case of a grayscale image and fixed range\n\\f[\\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)- \\texttt{loDiff} \\leq \\texttt{src} (x,y)  \\leq \\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)+ \\texttt{upDiff}\\f]\n\n\n- in case of a color image and floating range\n\\f[\\texttt{src} (x',y')_r- \\texttt{loDiff} _r \\leq \\texttt{src} (x,y)_r \\leq \\texttt{src} (x',y')_r+ \\texttt{upDiff} _r,\\f]\n\\f[\\texttt{src} (x',y')_g- \\texttt{loDiff} _g \\leq \\texttt{src} (x,y)_g \\leq \\texttt{src} (x',y')_g+ \\texttt{upDiff} _g\\f]\nand\n\\f[\\texttt{src} (x',y')_b- \\texttt{loDiff} _b \\leq \\texttt{src} (x,y)_b \\leq \\texttt{src} (x',y')_b+ \\texttt{upDiff} _b\\f]\n\n\n- in case of a color image and fixed range\n\\f[\\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_r- \\texttt{loDiff} _r \\leq \\texttt{src} (x,y)_r \\leq \\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_r+ \\texttt{upDiff} _r,\\f]\n\\f[\\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_g- \\texttt{loDiff} _g \\leq \\texttt{src} (x,y)_g \\leq \\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_g+ \\texttt{upDiff} _g\\f]\nand\n\\f[\\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_b- \\texttt{loDiff} _b \\leq \\texttt{src} (x,y)_b \\leq \\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_b+ \\texttt{upDiff} _b\\f]\n\n\nwhere \\f$src(x',y')\\f$ is the value of one of pixel neighbors that is already known to belong to the\ncomponent. That is, to be added to the connected component, a color/brightness of the pixel should\nbe close enough to:\n- Color/brightness of one of its neighbors that already belong to the connected component in case\nof a floating range.\n- Color/brightness of the seed point in case of a fixed range.\n\nUse these functions to either mark a connected component with the specified color in-place, or build\na mask and then extract the contour, or copy the region to another image, and so on.\n\n@param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the\nfunction unless the FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See\nthe details below.\n@param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels\ntaller than image. Since this is both an input and output parameter, you must take responsibility\nof initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example,\nan edge detector output can be used as a mask to stop filling at edges. On output, pixels in the\nmask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags\nas described below. It is therefore possible to use the same mask in multiple calls to the function\nto make sure the filled areas do not overlap.\n@param seedPoint Starting point.\n@param newVal New value of the repainted domain pixels.\n@param loDiff Maximal lower brightness/color difference between the currently observed pixel and\none of its neighbors belonging to the component, or a seed pixel being added to the component.\n@param upDiff Maximal upper brightness/color difference between the currently observed pixel and\none of its neighbors belonging to the component, or a seed pixel being added to the component.\n@param rect Optional output parameter set by the function to the minimum bounding rectangle of the\nrepainted domain.\n@param flags Operation flags. The first 8 bits contain a connectivity value. The default value of\n4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A\nconnectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)\nwill be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill\nthe mask (the default value is 1). For example, 4 | ( 255 \\<\\< 8 ) will consider 4 nearest\nneighbours and fill the mask with a value of 255. The following additional options occupy higher\nbits and therefore may be further combined with the connectivity and mask fill values using\nbit-wise or (|), see cv::FloodFillFlags.\n\n@note Since the mask is larger than the filled image, a pixel \\f$(x, y)\\f$ in image corresponds to the\npixel \\f$(x+1, y+1)\\f$ in the mask .\n\n@sa findContours\n */\nCV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask,\n                            Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0,\n                            Scalar loDiff = Scalar(), Scalar upDiff = Scalar(),\n                            int flags = 4 );\n\n/** @brief Converts an image from one color space to another.\n\nThe function converts an input image from one color space to another. In case of a transformation\nto-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note\nthat the default color format in OpenCV is often referred to as RGB but it is actually BGR (the\nbytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue\ncomponent, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and\nsixth bytes would then be the second pixel (Blue, then Green, then Red), and so on.\n\nThe conventional ranges for R, G, and B channel values are:\n-   0 to 255 for CV_8U images\n-   0 to 65535 for CV_16U images\n-   0 to 1 for CV_32F images\n\nIn case of linear transformations, the range does not matter. But in case of a non-linear\ntransformation, an input RGB image should be normalized to the proper value range to get the correct\nresults, for example, for RGB \\f$\\rightarrow\\f$ L\\*u\\*v\\* transformation. For example, if you have a\n32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will\nhave the 0..255 value range instead of 0..1 assumed by the function. So, before calling cvtColor ,\nyou need first to scale the image down:\n@code\n    img *= 1./255;\n    cvtColor(img, img, COLOR_BGR2Luv);\n@endcode\nIf you use cvtColor with 8-bit images, the conversion will have some information lost. For many\napplications, this will not be noticeable but it is recommended to use 32-bit images in applications\nthat need the full range of colors or that convert an image before an operation and then convert\nback.\n\nIf conversion adds the alpha channel, its value will set to the maximum of corresponding channel\nrange: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F.\n\n@param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision\nfloating-point.\n@param dst output image of the same size and depth as src.\n@param code color space conversion code (see cv::ColorConversionCodes).\n@param dstCn number of channels in the destination image; if the parameter is 0, the number of the\nchannels is derived automatically from src and code.\n\n@see @ref imgproc_color_conversions\n */\nCV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn = 0 );\n\n//! @} imgproc_misc\n\n// main function for all demosaicing procceses\nCV_EXPORTS_W void demosaicing(InputArray _src, OutputArray _dst, int code, int dcn = 0);\n\n//! @addtogroup imgproc_shape\n//! @{\n\n/** @brief Calculates all of the moments up to the third order of a polygon or rasterized shape.\n\nThe function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The\nresults are returned in the structure cv::Moments.\n\n@param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array (\n\\f$1 \\times N\\f$ or \\f$N \\times 1\\f$ ) of 2D points (Point or Point2f ).\n@param binaryImage If it is true, all non-zero image pixels are treated as 1's. The parameter is\nused for images only.\n@returns moments.\n\n@sa  contourArea, arcLength\n */\nCV_EXPORTS_W Moments moments( InputArray array, bool binaryImage = false );\n\n/** @brief Calculates seven Hu invariants.\n\nThe function calculates seven Hu invariants (introduced in @cite Hu62; see also\n<http://en.wikipedia.org/wiki/Image_moment>) defined as:\n\n\\f[\\begin{array}{l} hu[0]= \\eta _{20}+ \\eta _{02} \\\\ hu[1]=( \\eta _{20}- \\eta _{02})^{2}+4 \\eta _{11}^{2} \\\\ hu[2]=( \\eta _{30}-3 \\eta _{12})^{2}+ (3 \\eta _{21}- \\eta _{03})^{2} \\\\ hu[3]=( \\eta _{30}+ \\eta _{12})^{2}+ ( \\eta _{21}+ \\eta _{03})^{2} \\\\ hu[4]=( \\eta _{30}-3 \\eta _{12})( \\eta _{30}+ \\eta _{12})[( \\eta _{30}+ \\eta _{12})^{2}-3( \\eta _{21}+ \\eta _{03})^{2}]+(3 \\eta _{21}- \\eta _{03})( \\eta _{21}+ \\eta _{03})[3( \\eta _{30}+ \\eta _{12})^{2}-( \\eta _{21}+ \\eta _{03})^{2}] \\\\ hu[5]=( \\eta _{20}- \\eta _{02})[( \\eta _{30}+ \\eta _{12})^{2}- ( \\eta _{21}+ \\eta _{03})^{2}]+4 \\eta _{11}( \\eta _{30}+ \\eta _{12})( \\eta _{21}+ \\eta _{03}) \\\\ hu[6]=(3 \\eta _{21}- \\eta _{03})( \\eta _{21}+ \\eta _{03})[3( \\eta _{30}+ \\eta _{12})^{2}-( \\eta _{21}+ \\eta _{03})^{2}]-( \\eta _{30}-3 \\eta _{12})( \\eta _{21}+ \\eta _{03})[3( \\eta _{30}+ \\eta _{12})^{2}-( \\eta _{21}+ \\eta _{03})^{2}] \\\\ \\end{array}\\f]\n\nwhere \\f$\\eta_{ji}\\f$ stands for \\f$\\texttt{Moments::nu}_{ji}\\f$ .\n\nThese values are proved to be invariants to the image scale, rotation, and reflection except the\nseventh one, whose sign is changed by reflection. This invariance is proved with the assumption of\ninfinite image resolution. In case of raster images, the computed Hu invariants for the original and\ntransformed images are a bit different.\n\n@param moments Input moments computed with moments .\n@param hu Output Hu invariants.\n\n@sa matchShapes\n */\nCV_EXPORTS void HuMoments( const Moments& moments, double hu[7] );\n\n/** @overload */\nCV_EXPORTS_W void HuMoments( const Moments& m, OutputArray hu );\n\n//! @} imgproc_shape\n\n//! @addtogroup imgproc_object\n//! @{\n\n//! type of the template matching operation\nenum TemplateMatchModes {\n    TM_SQDIFF        = 0, //!< \\f[R(x,y)= \\sum _{x',y'} (T(x',y')-I(x+x',y+y'))^2\\f]\n    TM_SQDIFF_NORMED = 1, //!< \\f[R(x,y)= \\frac{\\sum_{x',y'} (T(x',y')-I(x+x',y+y'))^2}{\\sqrt{\\sum_{x',y'}T(x',y')^2 \\cdot \\sum_{x',y'} I(x+x',y+y')^2}}\\f]\n    TM_CCORR         = 2, //!< \\f[R(x,y)= \\sum _{x',y'} (T(x',y')  \\cdot I(x+x',y+y'))\\f]\n    TM_CCORR_NORMED  = 3, //!< \\f[R(x,y)= \\frac{\\sum_{x',y'} (T(x',y') \\cdot I(x+x',y+y'))}{\\sqrt{\\sum_{x',y'}T(x',y')^2 \\cdot \\sum_{x',y'} I(x+x',y+y')^2}}\\f]\n    TM_CCOEFF        = 4, //!< \\f[R(x,y)= \\sum _{x',y'} (T'(x',y')  \\cdot I'(x+x',y+y'))\\f]\n                          //!< where\n                          //!< \\f[\\begin{array}{l} T'(x',y')=T(x',y') - 1/(w  \\cdot h)  \\cdot \\sum _{x'',y''} T(x'',y'') \\\\ I'(x+x',y+y')=I(x+x',y+y') - 1/(w  \\cdot h)  \\cdot \\sum _{x'',y''} I(x+x'',y+y'') \\end{array}\\f]\n    TM_CCOEFF_NORMED = 5  //!< \\f[R(x,y)= \\frac{ \\sum_{x',y'} (T'(x',y') \\cdot I'(x+x',y+y')) }{ \\sqrt{\\sum_{x',y'}T'(x',y')^2 \\cdot \\sum_{x',y'} I'(x+x',y+y')^2} }\\f]\n};\n\n/** @brief Compares a template against overlapped image regions.\n\nThe function slides through image , compares the overlapped patches of size \\f$w \\times h\\f$ against\ntempl using the specified method and stores the comparison results in result . Here are the formulae\nfor the available comparison methods ( \\f$I\\f$ denotes image, \\f$T\\f$ template, \\f$R\\f$ result ). The summation\nis done over template and/or the image patch: \\f$x' = 0...w-1, y' = 0...h-1\\f$\n\nAfter the function finishes the comparison, the best matches can be found as global minimums (when\nTM_SQDIFF was used) or maximums (when TM_CCORR or TM_CCOEFF was used) using the\nminMaxLoc function. In case of a color image, template summation in the numerator and each sum in\nthe denominator is done over all of the channels and separate mean values are used for each channel.\nThat is, the function can take a color template and a color image. The result will still be a\nsingle-channel image, which is easier to analyze.\n\n@param image Image where the search is running. It must be 8-bit or 32-bit floating-point.\n@param templ Searched template. It must be not greater than the source image and have the same\ndata type.\n@param result Map of comparison results. It must be single-channel 32-bit floating-point. If image\nis \\f$W \\times H\\f$ and templ is \\f$w \\times h\\f$ , then result is \\f$(W-w+1) \\times (H-h+1)\\f$ .\n@param method Parameter specifying the comparison method, see cv::TemplateMatchModes\n@param mask Mask of searched template. It must have the same datatype and size with templ. It is\nnot set by default.\n */\nCV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ,\n                                 OutputArray result, int method, InputArray mask = noArray() );\n\n//! @}\n\n//! @addtogroup imgproc_shape\n//! @{\n\n/** @brief computes the connected components labeled image of boolean image\n\nimage with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0\nrepresents the background label. ltype specifies the output label image type, an important\nconsideration based on the total number of labels or alternatively the total number of pixels in\nthe source image.\n\n@param image the 8-bit single-channel image to be labeled\n@param labels destination labeled image\n@param connectivity 8 or 4 for 8-way or 4-way connectivity respectively\n@param ltype output image label type. Currently CV_32S and CV_16U are supported.\n */\nCV_EXPORTS_W int connectedComponents(InputArray image, OutputArray labels,\n                                     int connectivity = 8, int ltype = CV_32S);\n\n/** @overload\n@param image the 8-bit single-channel image to be labeled\n@param labels destination labeled image\n@param stats statistics output for each label, including the background label, see below for\navailable statistics. Statistics are accessed via stats(label, COLUMN) where COLUMN is one of\ncv::ConnectedComponentsTypes. The data type is CV_32S.\n@param centroids centroid output for each label, including the background label. Centroids are\naccessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.\n@param connectivity 8 or 4 for 8-way or 4-way connectivity respectively\n@param ltype output image label type. Currently CV_32S and CV_16U are supported.\n*/\nCV_EXPORTS_W int connectedComponentsWithStats(InputArray image, OutputArray labels,\n                                              OutputArray stats, OutputArray centroids,\n                                              int connectivity = 8, int ltype = CV_32S);\n\n\n/** @brief Finds contours in a binary image.\n\nThe function retrieves contours from the binary image using the algorithm @cite Suzuki85 . The contours\nare a useful tool for shape analysis and object detection and recognition. See squares.c in the\nOpenCV sample directory.\n\n@note Source image is modified by this function. Also, the function does not take into account\n1-pixel border of the image (it's filled with 0's and used for neighbor analysis in the algorithm),\ntherefore the contours touching the image border will be clipped.\n\n@param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero\npixels remain 0's, so the image is treated as binary . You can use compare , inRange , threshold ,\nadaptiveThreshold , Canny , and others to create a binary image out of a grayscale or color one.\nThe function modifies the image while extracting the contours. If mode equals to RETR_CCOMP\nor RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1).\n@param contours Detected contours. Each contour is stored as a vector of points.\n@param hierarchy Optional output vector, containing information about the image topology. It has\nas many elements as the number of contours. For each i-th contour contours[i] , the elements\nhierarchy[i][0] , hiearchy[i][1] , hiearchy[i][2] , and hiearchy[i][3] are set to 0-based indices\nin contours of the next and previous contours at the same hierarchical level, the first child\ncontour and the parent contour, respectively. If for the contour i there are no next, previous,\nparent, or nested contours, the corresponding elements of hierarchy[i] will be negative.\n@param mode Contour retrieval mode, see cv::RetrievalModes\n@param method Contour approximation method, see cv::ContourApproximationModes\n@param offset Optional offset by which every contour point is shifted. This is useful if the\ncontours are extracted from the image ROI and then they should be analyzed in the whole image\ncontext.\n */\nCV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours,\n                              OutputArray hierarchy, int mode,\n                              int method, Point offset = Point());\n\n/** @overload */\nCV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours,\n                              int mode, int method, Point offset = Point());\n\n/** @brief Approximates a polygonal curve(s) with the specified precision.\n\nThe functions approxPolyDP approximate a curve or a polygon with another curve/polygon with less\nvertices so that the distance between them is less or equal to the specified precision. It uses the\nDouglas-Peucker algorithm <http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm>\n\n@param curve Input vector of a 2D point stored in std::vector or Mat\n@param approxCurve Result of the approximation. The type should match the type of the input curve.\n@param epsilon Parameter specifying the approximation accuracy. This is the maximum distance\nbetween the original curve and its approximation.\n@param closed If true, the approximated curve is closed (its first and last vertices are\nconnected). Otherwise, it is not closed.\n */\nCV_EXPORTS_W void approxPolyDP( InputArray curve,\n                                OutputArray approxCurve,\n                                double epsilon, bool closed );\n\n/** @brief Calculates a contour perimeter or a curve length.\n\nThe function computes a curve length or a closed contour perimeter.\n\n@param curve Input vector of 2D points, stored in std::vector or Mat.\n@param closed Flag indicating whether the curve is closed or not.\n */\nCV_EXPORTS_W double arcLength( InputArray curve, bool closed );\n\n/** @brief Calculates the up-right bounding rectangle of a point set.\n\nThe function calculates and returns the minimal up-right bounding rectangle for the specified point set.\n\n@param points Input 2D point set, stored in std::vector or Mat.\n */\nCV_EXPORTS_W Rect boundingRect( InputArray points );\n\n/** @brief Calculates a contour area.\n\nThe function computes a contour area. Similarly to moments , the area is computed using the Green\nformula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using\ndrawContours or fillPoly , can be different. Also, the function will most certainly give a wrong\nresults for contours with self-intersections.\n\nExample:\n@code\n    vector<Point> contour;\n    contour.push_back(Point2f(0, 0));\n    contour.push_back(Point2f(10, 0));\n    contour.push_back(Point2f(10, 10));\n    contour.push_back(Point2f(5, 4));\n\n    double area0 = contourArea(contour);\n    vector<Point> approx;\n    approxPolyDP(contour, approx, 5, true);\n    double area1 = contourArea(approx);\n\n    cout << \"area0 =\" << area0 << endl <<\n            \"area1 =\" << area1 << endl <<\n            \"approx poly vertices\" << approx.size() << endl;\n@endcode\n@param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat.\n@param oriented Oriented area flag. If it is true, the function returns a signed area value,\ndepending on the contour orientation (clockwise or counter-clockwise). Using this feature you can\ndetermine orientation of a contour by taking the sign of an area. By default, the parameter is\nfalse, which means that the absolute value is returned.\n */\nCV_EXPORTS_W double contourArea( InputArray contour, bool oriented = false );\n\n/** @brief Finds a rotated rectangle of the minimum area enclosing the input 2D point set.\n\nThe function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a\nspecified point set. See the OpenCV sample minarea.cpp . Developer should keep in mind that the\nreturned rotatedRect can contain negative indices when data is close to the containing Mat element\nboundary.\n\n@param points Input vector of 2D points, stored in std::vector\\<\\> or Mat\n */\nCV_EXPORTS_W RotatedRect minAreaRect( InputArray points );\n\n/** @brief Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.\n\nThe function finds the four vertices of a rotated rectangle. This function is useful to draw the\nrectangle. In C++, instead of using this function, you can directly use box.points() method. Please\nvisit the [tutorial on bounding\nrectangle](http://docs.opencv.org/doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.html#bounding-rects-circles)\nfor more information.\n\n@param box The input rotated rectangle. It may be the output of\n@param points The output array of four vertices of rectangles.\n */\nCV_EXPORTS_W void boxPoints(RotatedRect box, OutputArray points);\n\n/** @brief Finds a circle of the minimum area enclosing a 2D point set.\n\nThe function finds the minimal enclosing circle of a 2D point set using an iterative algorithm. See\nthe OpenCV sample minarea.cpp .\n\n@param points Input vector of 2D points, stored in std::vector\\<\\> or Mat\n@param center Output center of the circle.\n@param radius Output radius of the circle.\n */\nCV_EXPORTS_W void minEnclosingCircle( InputArray points,\n                                      CV_OUT Point2f& center, CV_OUT float& radius );\n\n/** @example minarea.cpp\n  */\n\n/** @brief Finds a triangle of minimum area enclosing a 2D point set and returns its area.\n\nThe function finds a triangle of minimum area enclosing the given set of 2D points and returns its\narea. The output for a given 2D point set is shown in the image below. 2D points are depicted in\n*red* and the enclosing triangle in *yellow*.\n\n![Sample output of the minimum enclosing triangle function](pics/minenclosingtriangle.png)\n\nThe implementation of the algorithm is based on O'Rourke's @cite ORourke86 and Klee and Laskowski's\n@cite KleeLaskowski85 papers. O'Rourke provides a \\f$\\theta(n)\\f$ algorithm for finding the minimal\nenclosing triangle of a 2D convex polygon with n vertices. Since the minEnclosingTriangle function\ntakes a 2D point set as input an additional preprocessing step of computing the convex hull of the\n2D point set is required. The complexity of the convexHull function is \\f$O(n log(n))\\f$ which is higher\nthan \\f$\\theta(n)\\f$. Thus the overall complexity of the function is \\f$O(n log(n))\\f$.\n\n@param points Input vector of 2D points with depth CV_32S or CV_32F, stored in std::vector\\<\\> or Mat\n@param triangle Output vector of three 2D points defining the vertices of the triangle. The depth\nof the OutputArray must be CV_32F.\n */\nCV_EXPORTS_W double minEnclosingTriangle( InputArray points, CV_OUT OutputArray triangle );\n\n/** @brief Compares two shapes.\n\nThe function compares two shapes. All three implemented methods use the Hu invariants (see cv::HuMoments)\n\n@param contour1 First contour or grayscale image.\n@param contour2 Second contour or grayscale image.\n@param method Comparison method, see ::ShapeMatchModes\n@param parameter Method-specific parameter (not supported now).\n */\nCV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2,\n                                 int method, double parameter );\n\n/** @example convexhull.cpp\nAn example using the convexHull functionality\n*/\n\n/** @brief Finds the convex hull of a point set.\n\nThe functions find the convex hull of a 2D point set using the Sklansky's algorithm @cite Sklansky82\nthat has *O(N logN)* complexity in the current implementation. See the OpenCV sample convexhull.cpp\nthat demonstrates the usage of different function variants.\n\n@param points Input 2D point set, stored in std::vector or Mat.\n@param hull Output convex hull. It is either an integer vector of indices or vector of points. In\nthe first case, the hull elements are 0-based indices of the convex hull points in the original\narray (since the set of convex hull points is a subset of the original point set). In the second\ncase, hull elements are the convex hull points themselves.\n@param clockwise Orientation flag. If it is true, the output convex hull is oriented clockwise.\nOtherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing\nto the right, and its Y axis pointing upwards.\n@param returnPoints Operation flag. In case of a matrix, when the flag is true, the function\nreturns convex hull points. Otherwise, it returns indices of the convex hull points. When the\noutput array is std::vector, the flag is ignored, and the output depends on the type of the\nvector: std::vector\\<int\\> implies returnPoints=true, std::vector\\<Point\\> implies\nreturnPoints=false.\n */\nCV_EXPORTS_W void convexHull( InputArray points, OutputArray hull,\n                              bool clockwise = false, bool returnPoints = true );\n\n/** @brief Finds the convexity defects of a contour.\n\nThe figure below displays convexity defects of a hand contour:\n\n![image](pics/defects.png)\n\n@param contour Input contour.\n@param convexhull Convex hull obtained using convexHull that should contain indices of the contour\npoints that make the hull.\n@param convexityDefects The output vector of convexity defects. In C++ and the new Python/Java\ninterface each convexity defect is represented as 4-element integer vector (a.k.a. cv::Vec4i):\n(start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices\nin the original contour of the convexity defect beginning, end and the farthest point, and\nfixpt_depth is fixed-point approximation (with 8 fractional bits) of the distance between the\nfarthest contour point and the hull. That is, to get the floating-point value of the depth will be\nfixpt_depth/256.0.\n */\nCV_EXPORTS_W void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects );\n\n/** @brief Tests a contour convexity.\n\nThe function tests whether the input contour is convex or not. The contour must be simple, that is,\nwithout self-intersections. Otherwise, the function output is undefined.\n\n@param contour Input vector of 2D points, stored in std::vector\\<\\> or Mat\n */\nCV_EXPORTS_W bool isContourConvex( InputArray contour );\n\n//! finds intersection of two convex polygons\nCV_EXPORTS_W float intersectConvexConvex( InputArray _p1, InputArray _p2,\n                                          OutputArray _p12, bool handleNested = true );\n\n/** @example fitellipse.cpp\n  An example using the fitEllipse technique\n*/\n\n/** @brief Fits an ellipse around a set of 2D points.\n\nThe function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of\nall. It returns the rotated rectangle in which the ellipse is inscribed. The first algorithm described by @cite Fitzgibbon95\nis used. Developer should keep in mind that it is possible that the returned\nellipse/rotatedRect data contains negative indices, due to the data points being close to the\nborder of the containing Mat element.\n\n@param points Input 2D point set, stored in std::vector\\<\\> or Mat\n */\nCV_EXPORTS_W RotatedRect fitEllipse( InputArray points );\n\n/** @brief Fits a line to a 2D or 3D point set.\n\nThe function fitLine fits a line to a 2D or 3D point set by minimizing \\f$\\sum_i \\rho(r_i)\\f$ where\n\\f$r_i\\f$ is a distance between the \\f$i^{th}\\f$ point, the line and \\f$\\rho(r)\\f$ is a distance function, one\nof the following:\n-  DIST_L2\n\\f[\\rho (r) = r^2/2  \\quad \\text{(the simplest and the fastest least-squares method)}\\f]\n- DIST_L1\n\\f[\\rho (r) = r\\f]\n- DIST_L12\n\\f[\\rho (r) = 2  \\cdot ( \\sqrt{1 + \\frac{r^2}{2}} - 1)\\f]\n- DIST_FAIR\n\\f[\\rho \\left (r \\right ) = C^2  \\cdot \\left (  \\frac{r}{C} -  \\log{\\left(1 + \\frac{r}{C}\\right)} \\right )  \\quad \\text{where} \\quad C=1.3998\\f]\n- DIST_WELSCH\n\\f[\\rho \\left (r \\right ) =  \\frac{C^2}{2} \\cdot \\left ( 1 -  \\exp{\\left(-\\left(\\frac{r}{C}\\right)^2\\right)} \\right )  \\quad \\text{where} \\quad C=2.9846\\f]\n- DIST_HUBER\n\\f[\\rho (r) =  \\fork{r^2/2}{if \\(r < C\\)}{C \\cdot (r-C/2)}{otherwise} \\quad \\text{where} \\quad C=1.345\\f]\n\nThe algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique\nthat iteratively fits the line using the weighted least-squares algorithm. After each iteration the\nweights \\f$w_i\\f$ are adjusted to be inversely proportional to \\f$\\rho(r_i)\\f$ .\n\n@param points Input vector of 2D or 3D points, stored in std::vector\\<\\> or Mat.\n@param line Output line parameters. In case of 2D fitting, it should be a vector of 4 elements\n(like Vec4f) - (vx, vy, x0, y0), where (vx, vy) is a normalized vector collinear to the line and\n(x0, y0) is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like\nVec6f) - (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a normalized vector collinear to the line\nand (x0, y0, z0) is a point on the line.\n@param distType Distance used by the M-estimator, see cv::DistanceTypes\n@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value\nis chosen.\n@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the line).\n@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps.\n */\nCV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType,\n                           double param, double reps, double aeps );\n\n/** @brief Performs a point-in-contour test.\n\nThe function determines whether the point is inside a contour, outside, or lies on an edge (or\ncoincides with a vertex). It returns positive (inside), negative (outside), or zero (on an edge)\nvalue, correspondingly. When measureDist=false , the return value is +1, -1, and 0, respectively.\nOtherwise, the return value is a signed distance between the point and the nearest contour edge.\n\nSee below a sample output of the function where each image pixel is tested against the contour:\n\n![sample output](pics/pointpolygon.png)\n\n@param contour Input contour.\n@param pt Point tested against the contour.\n@param measureDist If true, the function estimates the signed distance from the point to the\nnearest contour edge. Otherwise, the function only checks if the point is inside a contour or not.\n */\nCV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist );\n\n/** @brief Finds out if there is any intersection between two rotated rectangles.\n\nIf there is then the vertices of the interesecting region are returned as well.\n\nBelow are some examples of intersection configurations. The hatched pattern indicates the\nintersecting region and the red vertices are returned by the function.\n\n![intersection examples](pics/intersection.png)\n\n@param rect1 First rectangle\n@param rect2 Second rectangle\n@param intersectingRegion The output array of the verticies of the intersecting region. It returns\nat most 8 vertices. Stored as std::vector\\<cv::Point2f\\> or cv::Mat as Mx1 of type CV_32FC2.\n@returns One of cv::RectanglesIntersectTypes\n */\nCV_EXPORTS_W int rotatedRectangleIntersection( const RotatedRect& rect1, const RotatedRect& rect2, OutputArray intersectingRegion  );\n\n//! @} imgproc_shape\n\nCV_EXPORTS_W Ptr<CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));\n\n//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122.\n//! Detects position only without traslation and rotation\nCV_EXPORTS Ptr<GeneralizedHoughBallard> createGeneralizedHoughBallard();\n\n//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038.\n//! Detects position, traslation and rotation\nCV_EXPORTS Ptr<GeneralizedHoughGuil> createGeneralizedHoughGuil();\n\n//! Performs linear blending of two images\nCV_EXPORTS void blendLinear(InputArray src1, InputArray src2, InputArray weights1, InputArray weights2, OutputArray dst);\n\n//! @addtogroup imgproc_colormap\n//! @{\n\n//! GNU Octave/MATLAB equivalent colormaps\nenum ColormapTypes\n{\n    COLORMAP_AUTUMN = 0, //!< ![autumn](pics/colormaps/colorscale_autumn.jpg)\n    COLORMAP_BONE = 1, //!< ![bone](pics/colormaps/colorscale_bone.jpg)\n    COLORMAP_JET = 2, //!< ![jet](pics/colormaps/colorscale_jet.jpg)\n    COLORMAP_WINTER = 3, //!< ![winter](pics/colormaps/colorscale_winter.jpg)\n    COLORMAP_RAINBOW = 4, //!< ![rainbow](pics/colormaps/colorscale_rainbow.jpg)\n    COLORMAP_OCEAN = 5, //!< ![ocean](pics/colormaps/colorscale_ocean.jpg)\n    COLORMAP_SUMMER = 6, //!< ![summer](pics/colormaps/colorscale_summer.jpg)\n    COLORMAP_SPRING = 7, //!< ![spring](pics/colormaps/colorscale_spring.jpg)\n    COLORMAP_COOL = 8, //!< ![cool](pics/colormaps/colorscale_cool.jpg)\n    COLORMAP_HSV = 9, //!< ![HSV](pics/colormaps/colorscale_hsv.jpg)\n    COLORMAP_PINK = 10, //!< ![pink](pics/colormaps/colorscale_pink.jpg)\n    COLORMAP_HOT = 11, //!< ![hot](pics/colormaps/colorscale_hot.jpg)\n    COLORMAP_PARULA = 12 //!< ![parula](pics/colormaps/colorscale_parula.jpg)\n};\n\n/** @brief Applies a GNU Octave/MATLAB equivalent colormap on a given image.\n\n@param src The source image, grayscale or colored does not matter.\n@param dst The result is the colormapped source image. Note: Mat::create is called on dst.\n@param colormap The colormap to apply, see cv::ColormapTypes\n */\nCV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap);\n\n//! @} imgproc_colormap\n\n//! @addtogroup imgproc_draw\n//! @{\n\n/** @brief Draws a line segment connecting two points.\n\nThe function line draws the line segment between pt1 and pt2 points in the image. The line is\nclipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected\nor 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased\nlines are drawn using Gaussian filtering.\n\n@param img Image.\n@param pt1 First point of the line segment.\n@param pt2 Second point of the line segment.\n@param color Line color.\n@param thickness Line thickness.\n@param lineType Type of the line, see cv::LineTypes.\n@param shift Number of fractional bits in the point coordinates.\n */\nCV_EXPORTS_W void line(InputOutputArray img, Point pt1, Point pt2, const Scalar& color,\n                     int thickness = 1, int lineType = LINE_8, int shift = 0);\n\n/** @brief Draws a arrow segment pointing from the first point to the second one.\n\nThe function arrowedLine draws an arrow between pt1 and pt2 points in the image. See also cv::line.\n\n@param img Image.\n@param pt1 The point the arrow starts from.\n@param pt2 The point the arrow points to.\n@param color Line color.\n@param thickness Line thickness.\n@param line_type Type of the line, see cv::LineTypes\n@param shift Number of fractional bits in the point coordinates.\n@param tipLength The length of the arrow tip in relation to the arrow length\n */\nCV_EXPORTS_W void arrowedLine(InputOutputArray img, Point pt1, Point pt2, const Scalar& color,\n                     int thickness=1, int line_type=8, int shift=0, double tipLength=0.1);\n\n/** @brief Draws a simple, thick, or filled up-right rectangle.\n\nThe function rectangle draws a rectangle outline or a filled rectangle whose two opposite corners\nare pt1 and pt2.\n\n@param img Image.\n@param pt1 Vertex of the rectangle.\n@param pt2 Vertex of the rectangle opposite to pt1 .\n@param color Rectangle color or brightness (grayscale image).\n@param thickness Thickness of lines that make up the rectangle. Negative values, like CV_FILLED ,\nmean that the function has to draw a filled rectangle.\n@param lineType Type of the line. See the line description.\n@param shift Number of fractional bits in the point coordinates.\n */\nCV_EXPORTS_W void rectangle(InputOutputArray img, Point pt1, Point pt2,\n                          const Scalar& color, int thickness = 1,\n                          int lineType = LINE_8, int shift = 0);\n\n/** @overload\n\nuse `rec` parameter as alternative specification of the drawn rectangle: `r.tl() and\nr.br()-Point(1,1)` are opposite corners\n*/\nCV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec,\n                          const Scalar& color, int thickness = 1,\n                          int lineType = LINE_8, int shift = 0);\n\n/** @brief Draws a circle.\n\nThe function circle draws a simple or filled circle with a given center and radius.\n@param img Image where the circle is drawn.\n@param center Center of the circle.\n@param radius Radius of the circle.\n@param color Circle color.\n@param thickness Thickness of the circle outline, if positive. Negative thickness means that a\nfilled circle is to be drawn.\n@param lineType Type of the circle boundary. See the line description.\n@param shift Number of fractional bits in the coordinates of the center and in the radius value.\n */\nCV_EXPORTS_W void circle(InputOutputArray img, Point center, int radius,\n                       const Scalar& color, int thickness = 1,\n                       int lineType = LINE_8, int shift = 0);\n\n/** @brief Draws a simple or thick elliptic arc or fills an ellipse sector.\n\nThe functions ellipse with less parameters draw an ellipse outline, a filled ellipse, an elliptic\narc, or a filled ellipse sector. A piecewise-linear curve is used to approximate the elliptic arc\nboundary. If you need more control of the ellipse rendering, you can retrieve the curve using\nellipse2Poly and then render it with polylines or fill it with fillPoly . If you use the first\nvariant of the function and want to draw the whole ellipse, not an arc, pass startAngle=0 and\nendAngle=360 . The figure below explains the meaning of the parameters.\n\n![Parameters of Elliptic Arc](pics/ellipse.png)\n\n@param img Image.\n@param center Center of the ellipse.\n@param axes Half of the size of the ellipse main axes.\n@param angle Ellipse rotation angle in degrees.\n@param startAngle Starting angle of the elliptic arc in degrees.\n@param endAngle Ending angle of the elliptic arc in degrees.\n@param color Ellipse color.\n@param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that\na filled ellipse sector is to be drawn.\n@param lineType Type of the ellipse boundary. See the line description.\n@param shift Number of fractional bits in the coordinates of the center and values of axes.\n */\nCV_EXPORTS_W void ellipse(InputOutputArray img, Point center, Size axes,\n                        double angle, double startAngle, double endAngle,\n                        const Scalar& color, int thickness = 1,\n                        int lineType = LINE_8, int shift = 0);\n\n/** @overload\n@param img Image.\n@param box Alternative ellipse representation via RotatedRect. This means that the function draws\nan ellipse inscribed in the rotated rectangle.\n@param color Ellipse color.\n@param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that\na filled ellipse sector is to be drawn.\n@param lineType Type of the ellipse boundary. See the line description.\n*/\nCV_EXPORTS_W void ellipse(InputOutputArray img, const RotatedRect& box, const Scalar& color,\n                        int thickness = 1, int lineType = LINE_8);\n\n/* ----------------------------------------------------------------------------------------- */\n/* ADDING A SET OF PREDEFINED MARKERS WHICH COULD BE USED TO HIGHLIGHT POSITIONS IN AN IMAGE */\n/* ----------------------------------------------------------------------------------------- */\n\n//! Possible set of marker types used for the cv::drawMarker function\nenum MarkerTypes\n{\n    MARKER_CROSS = 0,           //!< A crosshair marker shape\n    MARKER_TILTED_CROSS = 1,    //!< A 45 degree tilted crosshair marker shape\n    MARKER_STAR = 2,            //!< A star marker shape, combination of cross and tilted cross\n    MARKER_DIAMOND = 3,         //!< A diamond marker shape\n    MARKER_SQUARE = 4,          //!< A square marker shape\n    MARKER_TRIANGLE_UP = 5,     //!< An upwards pointing triangle marker shape\n    MARKER_TRIANGLE_DOWN = 6    //!< A downwards pointing triangle marker shape\n};\n\n/** @brief Draws a marker on a predefined position in an image.\n\nThe function drawMarker draws a marker on a given position in the image. For the moment several\nmarker types are supported, see cv::MarkerTypes for more information.\n\n@param img Image.\n@param position The point where the crosshair is positioned.\n@param markerType The specific type of marker you want to use, see cv::MarkerTypes\n@param color Line color.\n@param thickness Line thickness.\n@param line_type Type of the line, see cv::LineTypes\n@param markerSize The length of the marker axis [default = 20 pixels]\n */\nCV_EXPORTS_W void drawMarker(CV_IN_OUT Mat& img, Point position, const Scalar& color,\n                             int markerType = MARKER_CROSS, int markerSize=20, int thickness=1,\n                             int line_type=8);\n\n/* ----------------------------------------------------------------------------------------- */\n/* END OF MARKER SECTION */\n/* ----------------------------------------------------------------------------------------- */\n\n/** @overload */\nCV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts,\n                               const Scalar& color, int lineType = LINE_8,\n                               int shift = 0);\n\n/** @brief Fills a convex polygon.\n\nThe function fillConvexPoly draws a filled convex polygon. This function is much faster than the\nfunction cv::fillPoly . It can fill not only convex polygons but any monotonic polygon without\nself-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)\ntwice at the most (though, its top-most and/or the bottom edge could be horizontal).\n\n@param img Image.\n@param points Polygon vertices.\n@param color Polygon color.\n@param lineType Type of the polygon boundaries. See the line description.\n@param shift Number of fractional bits in the vertex coordinates.\n */\nCV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points,\n                                 const Scalar& color, int lineType = LINE_8,\n                                 int shift = 0);\n\n/** @overload */\nCV_EXPORTS void fillPoly(Mat& img, const Point** pts,\n                         const int* npts, int ncontours,\n                         const Scalar& color, int lineType = LINE_8, int shift = 0,\n                         Point offset = Point() );\n\n/** @brief Fills the area bounded by one or more polygons.\n\nThe function fillPoly fills an area bounded by several polygonal contours. The function can fill\ncomplex areas, for example, areas with holes, contours with self-intersections (some of their\nparts), and so forth.\n\n@param img Image.\n@param pts Array of polygons where each polygon is represented as an array of points.\n@param color Polygon color.\n@param lineType Type of the polygon boundaries. See the line description.\n@param shift Number of fractional bits in the vertex coordinates.\n@param offset Optional offset of all points of the contours.\n */\nCV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts,\n                           const Scalar& color, int lineType = LINE_8, int shift = 0,\n                           Point offset = Point() );\n\n/** @overload */\nCV_EXPORTS void polylines(Mat& img, const Point* const* pts, const int* npts,\n                          int ncontours, bool isClosed, const Scalar& color,\n                          int thickness = 1, int lineType = LINE_8, int shift = 0 );\n\n/** @brief Draws several polygonal curves.\n\n@param img Image.\n@param pts Array of polygonal curves.\n@param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,\nthe function draws a line from the last vertex of each curve to its first vertex.\n@param color Polyline color.\n@param thickness Thickness of the polyline edges.\n@param lineType Type of the line segments. See the line description.\n@param shift Number of fractional bits in the vertex coordinates.\n\nThe function polylines draws one or more polygonal curves.\n */\nCV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts,\n                            bool isClosed, const Scalar& color,\n                            int thickness = 1, int lineType = LINE_8, int shift = 0 );\n\n/** @example contours2.cpp\n  An example using the drawContour functionality\n*/\n\n/** @example segment_objects.cpp\nAn example using drawContours to clean up a background segmentation result\n */\n\n/** @brief Draws contours outlines or filled contours.\n\nThe function draws contour outlines in the image if \\f$\\texttt{thickness} \\ge 0\\f$ or fills the area\nbounded by the contours if \\f$\\texttt{thickness}<0\\f$ . The example below shows how to retrieve\nconnected components from the binary image and label them: :\n@code\n    #include \"opencv2/imgproc.hpp\"\n    #include \"opencv2/highgui.hpp\"\n\n    using namespace cv;\n    using namespace std;\n\n    int main( int argc, char** argv )\n    {\n        Mat src;\n        // the first command-line parameter must be a filename of the binary\n        // (black-n-white) image\n        if( argc != 2 || !(src=imread(argv[1], 0)).data)\n            return -1;\n\n        Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3);\n\n        src = src > 1;\n        namedWindow( \"Source\", 1 );\n        imshow( \"Source\", src );\n\n        vector<vector<Point> > contours;\n        vector<Vec4i> hierarchy;\n\n        findContours( src, contours, hierarchy,\n            RETR_CCOMP, CHAIN_APPROX_SIMPLE );\n\n        // iterate through all the top-level contours,\n        // draw each connected component with its own random color\n        int idx = 0;\n        for( ; idx >= 0; idx = hierarchy[idx][0] )\n        {\n            Scalar color( rand()&255, rand()&255, rand()&255 );\n            drawContours( dst, contours, idx, color, FILLED, 8, hierarchy );\n        }\n\n        namedWindow( \"Components\", 1 );\n        imshow( \"Components\", dst );\n        waitKey(0);\n    }\n@endcode\n\n@param image Destination image.\n@param contours All the input contours. Each contour is stored as a point vector.\n@param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.\n@param color Color of the contours.\n@param thickness Thickness of lines the contours are drawn with. If it is negative (for example,\nthickness=CV_FILLED ), the contour interiors are drawn.\n@param lineType Line connectivity. See cv::LineTypes.\n@param hierarchy Optional information about hierarchy. It is only needed if you want to draw only\nsome of the contours (see maxLevel ).\n@param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn.\nIf it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function\ndraws the contours, all the nested contours, all the nested-to-nested contours, and so on. This\nparameter is only taken into account when there is hierarchy available.\n@param offset Optional contour shift parameter. Shift all the drawn contours by the specified\n\\f$\\texttt{offset}=(dx,dy)\\f$ .\n */\nCV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours,\n                              int contourIdx, const Scalar& color,\n                              int thickness = 1, int lineType = LINE_8,\n                              InputArray hierarchy = noArray(),\n                              int maxLevel = INT_MAX, Point offset = Point() );\n\n/** @brief Clips the line against the image rectangle.\n\nThe functions clipLine calculate a part of the line segment that is entirely within the specified\nrectangle. They return false if the line segment is completely outside the rectangle. Otherwise,\nthey return true .\n@param imgSize Image size. The image rectangle is Rect(0, 0, imgSize.width, imgSize.height) .\n@param pt1 First line point.\n@param pt2 Second line point.\n */\nCV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2);\n\n/** @overload\n@param imgRect Image rectangle.\n@param pt1 First line point.\n@param pt2 Second line point.\n*/\nCV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2);\n\n/** @brief Approximates an elliptic arc with a polyline.\n\nThe function ellipse2Poly computes the vertices of a polyline that approximates the specified\nelliptic arc. It is used by cv::ellipse.\n\n@param center Center of the arc.\n@param axes Half of the size of the ellipse main axes. See the ellipse for details.\n@param angle Rotation angle of the ellipse in degrees. See the ellipse for details.\n@param arcStart Starting angle of the elliptic arc in degrees.\n@param arcEnd Ending angle of the elliptic arc in degrees.\n@param delta Angle between the subsequent polyline vertices. It defines the approximation\naccuracy.\n@param pts Output vector of polyline vertices.\n */\nCV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle,\n                                int arcStart, int arcEnd, int delta,\n                                CV_OUT std::vector<Point>& pts );\n\n/** @brief Draws a text string.\n\nThe function putText renders the specified text string in the image. Symbols that cannot be rendered\nusing the specified font are replaced by question marks. See getTextSize for a text rendering code\nexample.\n\n@param img Image.\n@param text Text string to be drawn.\n@param org Bottom-left corner of the text string in the image.\n@param fontFace Font type, see cv::HersheyFonts.\n@param fontScale Font scale factor that is multiplied by the font-specific base size.\n@param color Text color.\n@param thickness Thickness of the lines used to draw a text.\n@param lineType Line type. See the line for details.\n@param bottomLeftOrigin When true, the image data origin is at the bottom-left corner. Otherwise,\nit is at the top-left corner.\n */\nCV_EXPORTS_W void putText( InputOutputArray img, const String& text, Point org,\n                         int fontFace, double fontScale, Scalar color,\n                         int thickness = 1, int lineType = LINE_8,\n                         bool bottomLeftOrigin = false );\n\n/** @brief Calculates the width and height of a text string.\n\nThe function getTextSize calculates and returns the size of a box that contains the specified text.\nThat is, the following code renders some text, the tight box surrounding it, and the baseline: :\n@code\n    String text = \"Funny text inside the box\";\n    int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;\n    double fontScale = 2;\n    int thickness = 3;\n\n    Mat img(600, 800, CV_8UC3, Scalar::all(0));\n\n    int baseline=0;\n    Size textSize = getTextSize(text, fontFace,\n                                fontScale, thickness, &baseline);\n    baseline += thickness;\n\n    // center the text\n    Point textOrg((img.cols - textSize.width)/2,\n                  (img.rows + textSize.height)/2);\n\n    // draw the box\n    rectangle(img, textOrg + Point(0, baseline),\n              textOrg + Point(textSize.width, -textSize.height),\n              Scalar(0,0,255));\n    // ... and the baseline first\n    line(img, textOrg + Point(0, thickness),\n         textOrg + Point(textSize.width, thickness),\n         Scalar(0, 0, 255));\n\n    // then put the text itself\n    putText(img, text, textOrg, fontFace, fontScale,\n            Scalar::all(255), thickness, 8);\n@endcode\n\n@param text Input text string.\n@param fontFace Font to use, see cv::HersheyFonts.\n@param fontScale Font scale factor that is multiplied by the font-specific base size.\n@param thickness Thickness of lines used to render the text. See putText for details.\n@param[out] baseLine y-coordinate of the baseline relative to the bottom-most text\npoint.\n@return The size of a box that contains the specified text.\n\n@see cv::putText\n */\nCV_EXPORTS_W Size getTextSize(const String& text, int fontFace,\n                            double fontScale, int thickness,\n                            CV_OUT int* baseLine);\n\n/** @brief Line iterator\n\nThe class is used to iterate over all the pixels on the raster line\nsegment connecting two specified points.\n\nThe class LineIterator is used to get each pixel of a raster line. It\ncan be treated as versatile implementation of the Bresenham algorithm\nwhere you can stop at each pixel and do some extra processing, for\nexample, grab pixel values along the line or draw a line with an effect\n(for example, with XOR operation).\n\nThe number of pixels along the line is stored in LineIterator::count.\nThe method LineIterator::pos returns the current position in the image:\n\n@code{.cpp}\n// grabs pixels along the line (pt1, pt2)\n// from 8-bit 3-channel image to the buffer\nLineIterator it(img, pt1, pt2, 8);\nLineIterator it2 = it;\nvector<Vec3b> buf(it.count);\n\nfor(int i = 0; i < it.count; i++, ++it)\n    buf[i] = *(const Vec3b)*it;\n\n// alternative way of iterating through the line\nfor(int i = 0; i < it2.count; i++, ++it2)\n{\n    Vec3b val = img.at<Vec3b>(it2.pos());\n    CV_Assert(buf[i] == val);\n}\n@endcode\n*/\nclass CV_EXPORTS LineIterator\n{\npublic:\n    /** @brief intializes the iterator\n\n    creates iterators for the line connecting pt1 and pt2\n    the line will be clipped on the image boundaries\n    the line is 8-connected or 4-connected\n    If leftToRight=true, then the iteration is always done\n    from the left-most point to the right most,\n    not to depend on the ordering of pt1 and pt2 parameters\n    */\n    LineIterator( const Mat& img, Point pt1, Point pt2,\n                  int connectivity = 8, bool leftToRight = false );\n    /** @brief returns pointer to the current pixel\n    */\n    uchar* operator *();\n    /** @brief prefix increment operator (++it). shifts iterator to the next pixel\n    */\n    LineIterator& operator ++();\n    /** @brief postfix increment operator (it++). shifts iterator to the next pixel\n    */\n    LineIterator operator ++(int);\n    /** @brief returns coordinates of the current pixel\n    */\n    Point pos() const;\n\n    uchar* ptr;\n    const uchar* ptr0;\n    int step, elemSize;\n    int err, count;\n    int minusDelta, plusDelta;\n    int minusStep, plusStep;\n};\n\n//! @cond IGNORED\n\n// === LineIterator implementation ===\n\ninline\nuchar* LineIterator::operator *()\n{\n    return ptr;\n}\n\ninline\nLineIterator& LineIterator::operator ++()\n{\n    int mask = err < 0 ? -1 : 0;\n    err += minusDelta + (plusDelta & mask);\n    ptr += minusStep + (plusStep & mask);\n    return *this;\n}\n\ninline\nLineIterator LineIterator::operator ++(int)\n{\n    LineIterator it = *this;\n    ++(*this);\n    return it;\n}\n\ninline\nPoint LineIterator::pos() const\n{\n    Point p;\n    p.y = (int)((ptr - ptr0)/step);\n    p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize);\n    return p;\n}\n\n//! @endcond\n\n//! @} imgproc_draw\n\n//! @} imgproc\n\n} // cv\n\n#ifndef DISABLE_OPENCV_24_COMPATIBILITY\n#include \"opencv2/imgproc/imgproc_c.h\"\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/line_descriptor/descriptor.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2014, Biagio Montesano, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_DESCRIPTOR_HPP__\n#define __OPENCV_DESCRIPTOR_HPP__\n\n#include <map>\n#include <vector>\n#include <list>\n\n#if defined _MSC_VER && _MSC_VER <= 1700\n#include <stdint.h>\n#else\n#include <inttypes.h>\n#endif\n\n#include <stdio.h>\n#include <iostream>\n\n#include \"opencv2/core/utility.hpp\"\n//#include \"opencv2/core/private.hpp\"\n#include <opencv2/imgproc.hpp>\n#include <opencv2/features2d.hpp>\n#include <opencv2/highgui.hpp>\n#include \"opencv2/core.hpp\"\n\n/* define data types */\ntypedef uint64_t UINT64;\ntypedef uint32_t UINT32;\ntypedef uint16_t UINT16;\ntypedef uint8_t UINT8;\n\n/* define constants */\n#define UINT64_1 ((UINT64)0x01)\n#define UINT32_1 ((UINT32)0x01)\n\nnamespace cv\n{\nnamespace line_descriptor\n{\n\n//! @addtogroup line_descriptor\n//! @{\n\n/** @brief A class to represent a line\n\nAs aformentioned, it is been necessary to design a class that fully stores the information needed to\ncharacterize completely a line and plot it on image it was extracted from, when required.\n\n*KeyLine* class has been created for such goal; it is mainly inspired to Feature2d's KeyPoint class,\nsince KeyLine shares some of *KeyPoint*'s fields, even if a part of them assumes a different\nmeaning, when speaking about lines. In particular:\n\n-   the *class_id* field is used to gather lines extracted from different octaves which refer to\n    same line inside original image (such lines and the one they represent in original image share\n    the same *class_id* value)\n-   the *angle* field represents line's slope with respect to (positive) X axis\n-   the *pt* field represents line's midpoint\n-   the *response* field is computed as the ratio between the line's length and maximum between\n    image's width and height\n-   the *size* field is the area of the smallest rectangle containing line\n\nApart from fields inspired to KeyPoint class, KeyLines stores information about extremes of line in\noriginal image and in octave it was extracted from, about line's length and number of pixels it\ncovers.\n */\nstruct CV_EXPORTS KeyLine\n{\n public:\n  /** orientation of the line */\n  float angle;\n\n  /** object ID, that can be used to cluster keylines by the line they represent */\n  int class_id;\n\n  /** octave (pyramid layer), from which the keyline has been extracted */\n  int octave;\n\n  /** coordinates of the middlepoint */\n  Point2f pt;\n\n  /** the response, by which the strongest keylines have been selected.\n   It's represented by the ratio between line's length and maximum between\n   image's width and height */\n  float response;\n\n  /** minimum area containing line */\n  float size;\n\n  /** lines's extremes in original image */\n  float startPointX;\n  float startPointY;\n  float endPointX;\n  float endPointY;\n\n  /** line's extremes in image it was extracted from */\n  float sPointInOctaveX;\n  float sPointInOctaveY;\n  float ePointInOctaveX;\n  float ePointInOctaveY;\n\n  /** the length of line */\n  float lineLength;\n\n  /** number of pixels covered by the line */\n  int numOfPixels;\n\n  /** Returns the start point of the line in the original image */\n  Point2f getStartPoint() const\n  {\n    return Point2f(startPointX, startPointY);\n  }\n\n  /** Returns the end point of the line in the original image */\n  Point2f getEndPoint() const\n  {\n    return Point2f(endPointX, endPointY);\n  }\n\n  /** Returns the start point of the line in the octave it was extracted from */\n  Point2f getStartPointInOctave() const\n  {\n    return Point2f(sPointInOctaveX, sPointInOctaveY);\n  }\n\n  /** Returns the end point of the line in the octave it was extracted from */\n  Point2f getEndPointInOctave() const\n  {\n    return Point2f(ePointInOctaveX, ePointInOctaveY);\n  }\n\n  /** constructor */\n  KeyLine()\n  {\n  }\n};\n\n/** @brief Class implements both functionalities for detection of lines and computation of their\nbinary descriptor.\n\nClass' interface is mainly based on the ones of classical detectors and extractors, such as\nFeature2d's @ref features2d_main and @ref features2d_match. Retrieved information about lines is\nstored in line_descriptor::KeyLine objects.\n */\nclass CV_EXPORTS BinaryDescriptor : public Algorithm\n{\n\n public:\n  /** @brief List of BinaryDescriptor parameters:\n  */\n  struct CV_EXPORTS Params\n  {\n    /*CV_WRAP*/\n    Params();\n\n    /** the number of image octaves (default = 1) */\n\n    int numOfOctave_;\n\n    /** the width of band; (default: 7) */\n\n    int widthOfBand_;\n\n    /** image's reduction ratio in construction of Gaussian pyramids */\n    int reductionRatio;\n\n    int ksize_;\n\n    /** read parameters from a FileNode object and store them (struct function) */\n    void read( const FileNode& fn );\n\n    /** store parameters to a FileStorage object (struct function) */\n    void write( FileStorage& fs ) const;\n\n  };\n\n  /** @brief Constructor\n\n  @param parameters configuration parameters BinaryDescriptor::Params\n\n  If no argument is provided, constructor sets default values (see comments in the code snippet in\n  previous section). Default values are strongly reccomended.\n  */\n  BinaryDescriptor( const BinaryDescriptor::Params &parameters = BinaryDescriptor::Params() );\n\n  /** @brief Create a BinaryDescriptor object with default parameters (or with the ones provided)\n  and return a smart pointer to it\n     */\n  static Ptr<BinaryDescriptor> createBinaryDescriptor();\n  static Ptr<BinaryDescriptor> createBinaryDescriptor( Params parameters );\n\n  /** destructor */\n  ~BinaryDescriptor();\n\n  /** @brief Get current number of octaves\n  */\n  int getNumOfOctaves();/*CV_WRAP*/\n  /** @brief Set number of octaves\n    @param octaves number of octaves\n     */\n  void setNumOfOctaves( int octaves );/*CV_WRAP*/\n  /** @brief Get current width of bands\n    */\n  int getWidthOfBand();/*CV_WRAP*/\n  /** @brief Set width of bands\n    @param width width of bands\n    */\n  void setWidthOfBand( int width );/*CV_WRAP*/\n  /** @brief Get current reduction ratio (used in Gaussian pyramids)\n    */\n  int getReductionRatio();/*CV_WRAP*/\n  /** @brief Set reduction ratio (used in Gaussian pyramids)\n    @param rRatio reduction ratio\n     */\n  void setReductionRatio( int rRatio );\n\n  /** @brief Read parameters from a FileNode object and store them\n\n    @param fn source FileNode file\n     */\n  virtual void read( const cv::FileNode& fn );\n\n  /** @brief Store parameters to a FileStorage object\n\n    @param fs output FileStorage file\n     */\n  virtual void write( cv::FileStorage& fs ) const;\n\n  /** @brief Requires line detection\n\n    @param image input image\n    @param keypoints vector that will store extracted lines for one or more images\n    @param mask mask matrix to detect only KeyLines of interest\n     */\n  void detect( const Mat& image, CV_OUT std::vector<KeyLine>& keypoints, const Mat& mask = Mat() );\n\n  /** @overload\n\n    @param images input images\n    @param keylines set of vectors that will store extracted lines for one or more images\n    @param masks vector of mask matrices to detect only KeyLines of interest from each input image\n     */\n  void detect( const std::vector<Mat>& images, std::vector<std::vector<KeyLine> >& keylines, const std::vector<Mat>& masks =\n                   std::vector<Mat>() ) const;\n\n  /** @brief Requires descriptors computation\n\n    @param image input image\n    @param keylines vector containing lines for which descriptors must be computed\n    @param descriptors\n    @param returnFloatDescr flag (when set to true, original non-binary descriptors are returned)\n     */\n  void compute( const Mat& image, CV_OUT CV_IN_OUT std::vector<KeyLine>& keylines, CV_OUT Mat& descriptors, bool returnFloatDescr = false ) const;\n\n  /** @overload\n\n    @param images input images\n    @param keylines set of vectors containing lines for which descriptors must be computed\n    @param descriptors\n    @param returnFloatDescr flag (when set to true, original non-binary descriptors are returned)\n     */\n  void compute( const std::vector<Mat>& images, std::vector<std::vector<KeyLine> >& keylines, std::vector<Mat>& descriptors, bool returnFloatDescr =\n                    false ) const;\n\n  /** @brief Return descriptor size\n   */\n  int descriptorSize() const;\n\n  /** @brief Return data type\n   */\n  int descriptorType() const;\n\n  /** returns norm mode */\n  /*CV_WRAP*/\n  int defaultNorm() const;\n\n  /** @brief Define operator '()' to perform detection of KeyLines and computation of descriptors in a row.\n\n    @param image input image\n    @param mask mask matrix to select which lines in KeyLines must be accepted among the ones\n    extracted (used when *keylines* is not empty)\n    @param keylines vector that contains input lines (when filled, the detection part will be skipped\n    and input lines will be passed as input to the algorithm computing descriptors)\n    @param descriptors matrix that will store final descriptors\n    @param useProvidedKeyLines flag (when set to true, detection phase will be skipped and only\n    computation of descriptors will be executed, using lines provided in *keylines*)\n    @param returnFloatDescr flag (when set to true, original non-binary descriptors are returned)\n     */\n  virtual void operator()( InputArray image, InputArray mask, CV_OUT std::vector<KeyLine>& keylines, OutputArray descriptors,\n                           bool useProvidedKeyLines = false, bool returnFloatDescr = false ) const;\n\n protected:\n  /** implementation of line detection */\n  virtual void detectImpl( const Mat& imageSrc, std::vector<KeyLine>& keylines, const Mat& mask = Mat() ) const;\n\n  /** implementation of descriptors' computation */\n  virtual void computeImpl( const Mat& imageSrc, std::vector<KeyLine>& keylines, Mat& descriptors, bool returnFloatDescr,\n                            bool useDetectionData ) const;\n\n private:\n  /** struct to represent lines extracted from an octave */\n  struct OctaveLine\n  {\n    unsigned int octaveCount;  //the octave which this line is detected\n    unsigned int lineIDInOctave;  //the line ID in that octave image\n    unsigned int lineIDInScaleLineVec;  //the line ID in Scale line vector\n    float lineLength;  //the length of line in original image scale\n  };\n\n  // A 2D line (normal equation parameters).\n  struct SingleLine\n  {\n    //note: rho and theta are based on coordinate origin, i.e. the top-left corner of image\n    double rho;  //unit: pixel length\n    double theta;  //unit: rad\n    double linePointX;  // = rho * cos(theta);\n    double linePointY;  // = rho * sin(theta);\n    //for EndPoints, the coordinate origin is the top-left corner of image.\n    double startPointX;\n    double startPointY;\n    double endPointX;\n    double endPointY;\n    //direction of a line, the angle between positive line direction (dark side is in the left) and positive X axis.\n    double direction;\n    //mean gradient magnitude\n    double gradientMagnitude;\n    //mean gray value of pixels in dark side of line\n    double darkSideGrayValue;\n    //mean gray value of pixels in light side of line\n    double lightSideGrayValue;\n    //the length of line\n    double lineLength;\n    //the width of line;\n    double width;\n    //number of pixels\n    int numOfPixels;\n    //the decriptor of line\n    std::vector<double> descriptor;\n  };\n\n  // Specifies a vector of lines.\n  typedef std::vector<SingleLine> Lines_list;\n\n  struct OctaveSingleLine\n  {\n    /*endPoints, the coordinate origin is the top-left corner of the original image.\n     *startPointX = sPointInOctaveX * (factor)^octaveCount; */\n    float startPointX;\n    float startPointY;\n    float endPointX;\n    float endPointY;\n    //endPoints, the coordinate origin is the top-left corner of the octave image.\n    float sPointInOctaveX;\n    float sPointInOctaveY;\n    float ePointInOctaveX;\n    float ePointInOctaveY;\n    //direction of a line, the angle between positive line direction (dark side is in the left) and positive X axis.\n    float direction;\n    //the summation of gradient magnitudes of pixels on lines\n    float salience;\n    //the length of line\n    float lineLength;\n    //number of pixels\n    unsigned int numOfPixels;\n    //the octave which this line is detected\n    unsigned int octaveCount;\n    //the decriptor of line\n    std::vector<float> descriptor;\n  };\n\n  struct Pixel\n  {\n    unsigned int x;  //X coordinate\n    unsigned int y;  //Y coordinate\n  };\n  struct EdgeChains\n  {\n    std::vector<unsigned int> xCors;  //all the x coordinates of edge points\n    std::vector<unsigned int> yCors;  //all the y coordinates of edge points\n    std::vector<unsigned int> sId;  //the start index of each edge in the coordinate arrays\n    unsigned int numOfEdges;  //the number of edges whose length are larger than minLineLen; numOfEdges < sId.size;\n  };\n\n  struct LineChains\n  {\n    std::vector<unsigned int> xCors;  //all the x coordinates of line points\n    std::vector<unsigned int> yCors;  //all the y coordinates of line points\n    std::vector<unsigned int> sId;  //the start index of each line in the coordinate arrays\n    unsigned int numOfLines;  //the number of lines whose length are larger than minLineLen; numOfLines < sId.size;\n  };\n\n  typedef std::list<Pixel> PixelChain;  //each edge is a pixel chain\n\n  struct EDLineParam\n  {\n    int ksize;\n    float sigma;\n    float gradientThreshold;\n    float anchorThreshold;\n    int scanIntervals;\n    int minLineLen;\n    double lineFitErrThreshold;\n  };\n\n  #define RELATIVE_ERROR_FACTOR   100.0\n  #define MLN10   2.30258509299404568402\n  #define log_gamma(x)    ((x)>15.0?log_gamma_windschitl(x):log_gamma_lanczos(x))\n\n  /** This class is used to detect lines from input image.\n   * First, edges are extracted from input image following the method presented in Cihan Topal and\n   * Cuneyt Akinlar's paper:\"Edge Drawing: A Heuristic Approach to Robust Real-Time Edge Detection\", 2010.\n   * Then, lines are extracted from the edge image following the method presented in Cuneyt Akinlar and\n   * Cihan Topal's paper:\"EDLines: A real-time line segment detector with a false detection control\", 2011\n   * PS: The linking step of edge detection has a little bit difference with the Edge drawing algorithm\n   *     described in the paper. The edge chain doesn't stop when the pixel direction is changed.\n   */\n  class EDLineDetector\n  {\n   public:\n    EDLineDetector();\n    EDLineDetector( EDLineParam param );\n    ~EDLineDetector();\n\n    /*extract edges from image\n     *image:    In, gray image;\n     *edges:    Out, store the edges, each edge is a pixel chain\n     *return -1: error happen\n     */\n    int EdgeDrawing( cv::Mat &image, EdgeChains &edgeChains );\n\n    /*extract lines from image\n     *image:    In, gray image;\n     *lines:    Out, store the extracted lines,\n     *return -1: error happen\n     */\n    int EDline( cv::Mat &image, LineChains &lines );\n\n    /** extract line from image, and store them */\n    int EDline( cv::Mat &image );\n\n    cv::Mat dxImg_;  //store the dxImg;\n\n    cv::Mat dyImg_;  //store the dyImg;\n\n    cv::Mat gImgWO_;  //store the gradient image without threshold;\n\n    LineChains lines_;  //store the detected line chains;\n\n    //store the line Equation coefficients, vec3=[w1,w2,w3] for line w1*x + w2*y + w3=0;\n    std::vector<std::vector<double> > lineEquations_;\n\n    //store the line endpoints, [x1,y1,x2,y3]\n    std::vector<std::vector<float> > lineEndpoints_;\n\n    //store the line direction\n    std::vector<float> lineDirection_;\n\n    //store the line salience, which is the summation of gradients of pixels on line\n    std::vector<float> lineSalience_;\n\n    // image sizes\n    unsigned int imageWidth;\n    unsigned int imageHeight;\n\n    /*The threshold of line fit error;\n     *If lineFitErr is large than this threshold, then\n     *the pixel chain is not accepted as a single line segment.*/\n    double lineFitErrThreshold_;\n\n    /*the threshold of pixel gradient magnitude.\n     *Only those pixel whose gradient magnitude are larger than this threshold will be\n     *taken as possible edge points. Default value is 36*/\n    short gradienThreshold_;\n\n    /*If the pixel's gradient value is bigger than both of its neighbors by a\n     *certain threshold (ANCHOR_THRESHOLD), the pixel is marked to be an anchor.\n     *Default value is 8*/\n    unsigned char anchorThreshold_;\n\n    /*anchor testing can be performed at different scan intervals, i.e.,\n     *every row/column, every second row/column etc.\n     *Default value is 2*/\n    unsigned int scanIntervals_;\n\n    int minLineLen_;  //minimal acceptable line length\n\n   private:\n    void InitEDLine_();\n\n    /*For an input edge chain, find the best fit line, the default chain length is minLineLen_\n     *xCors:  In, pointer to the X coordinates of pixel chain;\n     *yCors:  In, pointer to the Y coordinates of pixel chain;\n     *offsetS:In, start index of this chain in vector;\n     *lineEquation: Out, [a,b] which are the coefficient of lines y=ax+b(horizontal) or x=ay+b(vertical);\n     *return:  line fit error; -1:error happens;\n     */\n    double LeastSquaresLineFit_( unsigned int *xCors, unsigned int *yCors, unsigned int offsetS, std::vector<double> &lineEquation );\n\n    /*For an input pixel chain, find the best fit line. Only do the update based on new points.\n     *For A*x=v,  Least square estimation of x = Inv(A^T * A) * (A^T * v);\n     *If some new observations are added, i.e, [A; A'] * x = [v; v'],\n     *then x' = Inv(A^T * A + (A')^T * A') * (A^T * v + (A')^T * v');\n     *xCors:  In, pointer to the X coordinates of pixel chain;\n     *yCors:  In, pointer to the Y coordinates of pixel chain;\n     *offsetS:In, start index of this chain in vector;\n     *newOffsetS: In, start index of extended part;\n     *offsetE:In, end index of this chain in vector;\n     *lineEquation: Out, [a,b] which are the coefficient of lines y=ax+b(horizontal) or x=ay+b(vertical);\n     *return:  line fit error; -1:error happens;\n     */\n    double LeastSquaresLineFit_( unsigned int *xCors, unsigned int *yCors, unsigned int offsetS, unsigned int newOffsetS, unsigned int offsetE,\n                                 std::vector<double> &lineEquation );\n\n    /** Validate line based on the Helmholtz principle, which basically states that\n     * for a structure to be perceptually meaningful, the expectation of this structure\n     * by chance must be very low.\n     */\n    bool LineValidation_( unsigned int *xCors, unsigned int *yCors, unsigned int offsetS, unsigned int offsetE, std::vector<double> &lineEquation,\n                          float &direction );\n\n    bool bValidate_;  //flag to decide whether line will be validated\n\n    int ksize_;  //the size of Gaussian kernel: ksize X ksize, default value is 5.\n\n    float sigma_;  //the sigma of Gaussian kernal, default value is 1.0.\n\n    /*For example, there two edges in the image:\n     *edge1 = [(7,4), (8,5), (9,6),| (10,7)|, (11, 8), (12,9)] and\n     *edge2 = [(14,9), (15,10), (16,11), (17,12),| (18, 13)|, (19,14)] ; then we store them as following:\n     *pFirstPartEdgeX_ = [10, 11, 12, 18, 19];//store the first part of each edge[from middle to end]\n     *pFirstPartEdgeY_ = [7,  8,  9,  13, 14];\n     *pFirstPartEdgeS_ = [0,3,5];// the index of start point of first part of each edge\n     *pSecondPartEdgeX_ = [10, 9, 8, 7, 18, 17, 16, 15, 14];//store the second part of each edge[from middle to front]\n     *pSecondPartEdgeY_ = [7,  6, 5, 4, 13, 12, 11, 10, 9];//anchor points(10, 7) and (18, 13) are stored again\n     *pSecondPartEdgeS_ = [0, 4, 9];// the index of start point of second part of each edge\n     *This type of storage order is because of the order of edge detection process.\n     *For each edge, start from one anchor point, first go right, then go left or first go down, then go up*/\n\n    //store the X coordinates of the first part of the pixels for chains\n    unsigned int *pFirstPartEdgeX_;\n\n    //store the Y coordinates of the first part of the pixels for chains\n    unsigned int *pFirstPartEdgeY_;\n\n    //store the start index of every edge chain in the first part arrays\n    unsigned int *pFirstPartEdgeS_;\n\n    //store the X coordinates of the second part of the pixels for chains\n    unsigned int *pSecondPartEdgeX_;\n\n    //store the Y coordinates of the second part of the pixels for chains\n    unsigned int *pSecondPartEdgeY_;\n\n    //store the start index of every edge chain in the second part arrays\n    unsigned int *pSecondPartEdgeS_;\n\n    //store the X coordinates of anchors\n    unsigned int *pAnchorX_;\n\n    //store the Y coordinates of anchors\n    unsigned int *pAnchorY_;\n\n    //edges\n    cv::Mat edgeImage_;\n\n    cv::Mat gImg_;  //store the gradient image;\n\n    cv::Mat dirImg_;  //store the direction image\n\n    double logNT_;\n\n    cv::Mat_<float> ATA;   //the previous matrix of A^T * A;\n\n    cv::Mat_<float> ATV;    //the previous vector of A^T * V;\n\n    cv::Mat_<float> fitMatT;   //the matrix used in line fit function;\n\n    cv::Mat_<float> fitVec;    //the vector used in line fit function;\n\n    cv::Mat_<float> tempMatLineFit;  //the matrix used in line fit function;\n\n    cv::Mat_<float> tempVecLineFit;    //the vector used in line fit function;\n\n    /** Compare doubles by relative error.\n     The resulting rounding error after floating point computations\n     depend on the specific operations done. The same number computed by\n     different algorithms could present different rounding errors. For a\n     useful comparison, an estimation of the relative rounding error\n     should be considered and compared to a factor times EPS. The factor\n     should be related to the accumulated rounding error in the chain of\n     computation. Here, as a simplification, a fixed factor is used.\n     */\n    static int double_equal( double a, double b )\n    {\n      double abs_diff, aa, bb, abs_max;\n      /* trivial case */\n      if( a == b )\n        return true;\n      abs_diff = fabs( a - b );\n      aa = fabs( a );\n      bb = fabs( b );\n      abs_max = aa > bb ? aa : bb;\n\n      /* DBL_MIN is the smallest normalized number, thus, the smallest\n       number whose relative error is bounded by DBL_EPSILON. For\n       smaller numbers, the same quantization steps as for DBL_MIN\n       are used. Then, for smaller numbers, a meaningful \"relative\"\n       error should be computed by dividing the difference by DBL_MIN. */\n      if( abs_max < DBL_MIN )\n        abs_max = DBL_MIN;\n\n      /* equal if relative error <= factor x eps */\n      return ( abs_diff / abs_max ) <= ( RELATIVE_ERROR_FACTOR * DBL_EPSILON );\n    }\n\n    /** Computes the natural logarithm of the absolute value of\n     the gamma function of x using the Lanczos approximation.\n     See http://www.rskey.org/gamma.htm\n     The formula used is\n     @f[\n     \\Gamma(x) = \\frac{ \\sum_{n=0}^{N} q_n x^n }{ \\Pi_{n=0}^{N} (x+n) }\n     (x+5.5)^{x+0.5} e^{-(x+5.5)}\n     @f]\n     so\n     @f[\n     \\log\\Gamma(x) = \\log\\left( \\sum_{n=0}^{N} q_n x^n \\right)\n     + (x+0.5) \\log(x+5.5) - (x+5.5) - \\sum_{n=0}^{N} \\log(x+n)\n     @f]\n     and\n     q0 = 75122.6331530,\n     q1 = 80916.6278952,\n     q2 = 36308.2951477,\n     q3 = 8687.24529705,\n     q4 = 1168.92649479,\n     q5 = 83.8676043424,\n     q6 = 2.50662827511.\n     */\n    static double log_gamma_lanczos( double x )\n    {\n      static double q[7] =\n      { 75122.6331530, 80916.6278952, 36308.2951477, 8687.24529705, 1168.92649479, 83.8676043424, 2.50662827511 };\n      double a = ( x + 0.5 ) * log( x + 5.5 ) - ( x + 5.5 );\n      double b = 0.0;\n      int n;\n      for ( n = 0; n < 7; n++ )\n      {\n        a -= log( x + (double) n );\n        b += q[n] * pow( x, (double) n );\n      }\n      return a + log( b );\n    }\n\n    /** Computes the natural logarithm of the absolute value of\n     the gamma function of x using Windschitl method.\n     See http://www.rskey.org/gamma.htm\n     The formula used is\n     @f[\n     \\Gamma(x) = \\sqrt{\\frac{2\\pi}{x}} \\left( \\frac{x}{e}\n     \\sqrt{ x\\sinh(1/x) + \\frac{1}{810x^6} } \\right)^x\n     @f]\n     so\n     @f[\n     \\log\\Gamma(x) = 0.5\\log(2\\pi) + (x-0.5)\\log(x) - x\n     + 0.5x\\log\\left( x\\sinh(1/x) + \\frac{1}{810x^6} \\right).\n     @f]\n     This formula is a good approximation when x > 15.\n     */\n    static double log_gamma_windschitl( double x )\n    {\n      return 0.918938533204673 + ( x - 0.5 ) * log( x ) - x + 0.5 * x * log( x * sinh( 1 / x ) + 1 / ( 810.0 * pow( x, 6.0 ) ) );\n    }\n\n    /** Computes -log10(NFA).\n     NFA stands for Number of False Alarms:\n     @f[\n     \\mathrm{NFA} = NT \\cdot B(n,k,p)\n     @f]\n     - NT       - number of tests\n     - B(n,k,p) - tail of binomial distribution with parameters n,k and p:\n     @f[\n     B(n,k,p) = \\sum_{j=k}^n\n     \\left(\\begin{array}{c}n\\\\j\\end{array}\\right)\n     p^{j} (1-p)^{n-j}\n     @f]\n     The value -log10(NFA) is equivalent but more intuitive than NFA:\n     - -1 corresponds to 10 mean false alarms\n     -  0 corresponds to 1 mean false alarm\n     -  1 corresponds to 0.1 mean false alarms\n     -  2 corresponds to 0.01 mean false alarms\n     -  ...\n     Used this way, the bigger the value, better the detection,\n     and a logarithmic scale is used.\n     @param n,k,p binomial parameters.\n     @param logNT logarithm of Number of Tests\n     The computation is based in the gamma function by the following\n     relation:\n     @f[\n     \\left(\\begin{array}{c}n\\\\k\\end{array}\\right)\n     = \\frac{ \\Gamma(n+1) }{ \\Gamma(k+1) \\cdot \\Gamma(n-k+1) }.\n     @f]\n     We use efficient algorithms to compute the logarithm of\n     the gamma function.\n     To make the computation faster, not all the sum is computed, part\n     of the terms are neglected based on a bound to the error obtained\n     (an error of 10% in the result is accepted).\n     */\n    static double nfa( int n, int k, double p, double logNT )\n    {\n      double tolerance = 0.1; /* an error of 10% in the result is accepted */\n      double log1term, term, bin_term, mult_term, bin_tail, err, p_term;\n      int i;\n\n      /* check parameters */\n      if( n < 0 || k < 0 || k > n || p <= 0.0 || p >= 1.0 )\n      {\n        std::cout << \"nfa: wrong n, k or p values.\" << std::endl;\n        exit( 0 );\n      }\n      /* trivial cases */\n      if( n == 0 || k == 0 )\n        return -logNT;\n      if( n == k )\n        return -logNT - (double) n * log10( p );\n\n      /* probability term */\n      p_term = p / ( 1.0 - p );\n\n      /* compute the first term of the series */\n      /*\n       binomial_tail(n,k,p) = sum_{i=k}^n bincoef(n,i) * p^i * (1-p)^{n-i}\n       where bincoef(n,i) are the binomial coefficients.\n       But\n       bincoef(n,k) = gamma(n+1) / ( gamma(k+1) * gamma(n-k+1) ).\n       We use this to compute the first term. Actually the log of it.\n       */\n      log1term = log_gamma( (double) n + 1.0 )- log_gamma( (double ) k + 1.0 )- log_gamma( (double ) ( n - k ) + 1.0 )\n+ (double) k * log( p )\n+ (double) ( n - k ) * log( 1.0 - p );\nterm = exp( log1term );\n\n/* in some cases no more computations are needed */\nif( double_equal( term, 0.0 ) )\n{ /* the first term is almost zero */\n  if( (double) k > (double) n * p ) /* at begin or end of the tail?  */\n  return -log1term / MLN10 - logNT; /* end: use just the first term  */\n  else\n  return -logNT; /* begin: the tail is roughly 1  */\n}\n\n/* compute more terms if needed */\nbin_tail = term;\nfor ( i = k + 1; i <= n; i++ )\n{\n  /*    As\n   term_i = bincoef(n,i) * p^i * (1-p)^(n-i)\n   and\n   bincoef(n,i)/bincoef(n,i-1) = n-i+1 / i,\n   then,\n   term_i / term_i-1 = (n-i+1)/i * p/(1-p)\n   and\n   term_i = term_i-1 * (n-i+1)/i * p/(1-p).\n   p/(1-p) is computed only once and stored in 'p_term'.\n   */\n  bin_term = (double) ( n - i + 1 ) / (double) i;\n  mult_term = bin_term * p_term;\n  term *= mult_term;\n  bin_tail += term;\n  if( bin_term < 1.0 )\n  {\n    /* When bin_term<1 then mult_term_j<mult_term_i for j>i.\n     Then, the error on the binomial tail when truncated at\n     the i term can be bounded by a geometric series of form\n     term_i * sum mult_term_i^j.                            */\n    err = term * ( ( 1.0 - pow( mult_term, (double) ( n - i + 1 ) ) ) / ( 1.0 - mult_term ) - 1.0 );\n    /* One wants an error at most of tolerance*final_result, or:\n     tolerance * abs(-log10(bin_tail)-logNT).\n     Now, the error that can be accepted on bin_tail is\n     given by tolerance*final_result divided by the derivative\n     of -log10(x) when x=bin_tail. that is:\n     tolerance * abs(-log10(bin_tail)-logNT) / (1/bin_tail)\n     Finally, we truncate the tail if the error is less than:\n     tolerance * abs(-log10(bin_tail)-logNT) * bin_tail        */\n    if( err < tolerance * fabs( -log10( bin_tail ) - logNT ) * bin_tail )\n    break;\n  }\n}\nreturn -log10( bin_tail ) - logNT;\n}\n};\n\n  // Specifies a vector of lines.\ntypedef std::vector<OctaveSingleLine> LinesVec;\n\n// each element in ScaleLines is a vector of lines\n// which corresponds the same line detected in different octave images.\ntypedef std::vector<LinesVec> ScaleLines;\n\n/* compute Gaussian pyramids */\nvoid computeGaussianPyramid( const Mat& image, const int numOctaves );\n\n/* compute Sobel's derivatives */\nvoid computeSobel( const Mat& image, const int numOctaves );\n\n/* conversion of an LBD descriptor to its binary representation */\nunsigned char binaryConversion( float* f1, float* f2 );\n\n/* compute LBD descriptors using EDLine extractor */\nint computeLBD( ScaleLines &keyLines, bool useDetectionData = false );\n\n/* gathers lines in groups using EDLine extractor.\n Each group contains the same line, detected in different octaves */\nint OctaveKeyLines( cv::Mat& image, ScaleLines &keyLines );\n\n/* the local gaussian coefficient applied to the orthogonal line direction within each band */\nstd::vector<double> gaussCoefL_;\n\n/* the global gaussian coefficient applied to each row within line support region */\nstd::vector<double> gaussCoefG_;\n\n/* descriptor parameters */\nParams params;\n\n/* vector of sizes of downsampled and blurred images */\nstd::vector<cv::Size> images_sizes;\n\n/*For each octave of image, we define an EDLineDetector, because we can get gradient images (dxImg, dyImg, gImg)\n *from the EDLineDetector class without extra computation cost. Another reason is that, if we use\n *a single EDLineDetector to detect lines in different octave of images, then we need to allocate and release\n *memory for gradient images (dxImg, dyImg, gImg) repeatedly for their varying size*/\nstd::vector<Ptr<EDLineDetector> > edLineVec_;\n\n/* Sobel's derivatives */\nstd::vector<cv::Mat> dxImg_vector, dyImg_vector;\n\n/* Gaussian pyramid */\nstd::vector<cv::Mat> octaveImages;\n\n};\n\n/**\nLines extraction methodology\n----------------------------\n\nThe lines extraction methodology described in the following is mainly based on @cite EDL . The\nextraction starts with a Gaussian pyramid generated from an original image, downsampled N-1 times,\nblurred N times, to obtain N layers (one for each octave), with layer 0 corresponding to input\nimage. Then, from each layer (octave) in the pyramid, lines are extracted using LSD algorithm.\n\nDifferently from EDLine lines extractor used in original article, LSD furnishes information only\nabout lines extremes; thus, additional information regarding slope and equation of line are computed\nvia analytic methods. The number of pixels is obtained using *LineIterator*. Extracted lines are\nreturned in the form of KeyLine objects, but since extraction is based on a method different from\nthe one used in *BinaryDescriptor* class, data associated to a line's extremes in original image and\nin octave it was extracted from, coincide. KeyLine's field *class_id* is used as an index to\nindicate the order of extraction of a line inside a single octave.\n*/\nclass CV_EXPORTS LSDDetector : public Algorithm\n{\npublic:\n\n/* constructor */\n/*CV_WRAP*/\nLSDDetector()\n{\n}\n;\n\n/** @brief Creates ad LSDDetector object, using smart pointers.\n */\nstatic Ptr<LSDDetector> createLSDDetector();\n\n/** @brief Detect lines inside an image.\n\n@param image input image\n@param keypoints vector that will store extracted lines for one or more images\n@param scale scale factor used in pyramids generation\n@param numOctaves number of octaves inside pyramid\n@param mask mask matrix to detect only KeyLines of interest\n */\nvoid detect( const Mat& image, CV_OUT std::vector<KeyLine>& keypoints, int scale, int numOctaves, const Mat& mask = Mat() );\n\n/** @overload\n@param images input images\n@param keylines set of vectors that will store extracted lines for one or more images\n@param scale scale factor used in pyramids generation\n@param numOctaves number of octaves inside pyramid\n@param masks vector of mask matrices to detect only KeyLines of interest from each input image\n*/\nvoid detect( const std::vector<Mat>& images, std::vector<std::vector<KeyLine> >& keylines, int scale, int numOctaves,\nconst std::vector<Mat>& masks = std::vector<Mat>() ) const;\n\nprivate:\n/* compute Gaussian pyramid of input image */\nvoid computeGaussianPyramid( const Mat& image, int numOctaves, int scale );\n\n/* implementation of line detection */\nvoid detectImpl( const Mat& imageSrc, std::vector<KeyLine>& keylines, int numOctaves, int scale, const Mat& mask ) const;\n\n/* matrices for Gaussian pyramids */\nstd::vector<cv::Mat> gaussianPyrs;\n};\n\n/** @brief furnishes all functionalities for querying a dataset provided by user or internal to\nclass (that user must, anyway, populate) on the model of @ref features2d_match\n\n\nOnce descriptors have been extracted from an image (both they represent lines and points), it\nbecomes interesting to be able to match a descriptor with another one extracted from a different\nimage and representing the same line or point, seen from a differente perspective or on a different\nscale. In reaching such goal, the main headache is designing an efficient search algorithm to\nassociate a query descriptor to one extracted from a dataset. In the following, a matching modality\nbased on *Multi-Index Hashing (MiHashing)* will be described.\n\nMulti-Index Hashing\n-------------------\n\nThe theory described in this section is based on @cite MIH . Given a dataset populated with binary\ncodes, each code is indexed *m* times into *m* different hash tables, according to *m* substrings it\nhas been divided into. Thus, given a query code, all the entries close to it at least in one\nsubstring are returned by search as *neighbor candidates*. Returned entries are then checked for\nvalidity by verifying that their full codes are not distant (in Hamming space) more than *r* bits\nfrom query code. In details, each binary code **h** composed of *b* bits is divided into *m*\ndisjoint substrings \\f$\\mathbf{h}^{(1)}, ..., \\mathbf{h}^{(m)}\\f$, each with length\n\\f$\\lfloor b/m \\rfloor\\f$ or \\f$\\lceil b/m \\rceil\\f$ bits. Formally, when two codes **h** and **g** differ\nby at the most *r* bits, in at the least one of their *m* substrings they differ by at the most\n\\f$\\lfloor r/m \\rfloor\\f$ bits. In particular, when \\f$||\\mathbf{h}-\\mathbf{g}||_H \\le r\\f$ (where \\f$||.||_H\\f$\nis the Hamming norm), there must exist a substring *k* (with \\f$1 \\le k \\le m\\f$) such that\n\n\\f[||\\mathbf{h}^{(k)} - \\mathbf{g}^{(k)}||_H \\le \\left\\lfloor \\frac{r}{m} \\right\\rfloor .\\f]\n\nThat means that if Hamming distance between each of the *m* substring is strictly greater than\n\\f$\\lfloor r/m \\rfloor\\f$, then \\f$||\\mathbf{h}-\\mathbf{g}||_H\\f$ must be larger that *r* and that is a\ncontradiction. If the codes in dataset are divided into *m* substrings, then *m* tables will be\nbuilt. Given a query **q** with substrings \\f$\\{\\mathbf{q}^{(i)}\\}^m_{i=1}\\f$, *i*-th hash table is\nsearched for entries distant at the most \\f$\\lfloor r/m \\rfloor\\f$ from \\f$\\mathbf{q}^{(i)}\\f$ and a set of\ncandidates \\f$\\mathcal{N}_i(\\mathbf{q})\\f$ is obtained. The union of sets\n\\f$\\mathcal{N}(\\mathbf{q}) = \\bigcup_i \\mathcal{N}_i(\\mathbf{q})\\f$ is a superset of the *r*-neighbors\nof **q**. Then, last step of algorithm is computing the Hamming distance between **q** and each\nelement in \\f$\\mathcal{N}(\\mathbf{q})\\f$, deleting the codes that are distant more that *r* from **q**.\n*/\nclass CV_EXPORTS BinaryDescriptorMatcher : public Algorithm\n{\n\npublic:\n/** @brief For every input query descriptor, retrieve the best matching one from a dataset provided from user\nor from the one internal to class\n\n@param queryDescriptors query descriptors\n@param trainDescriptors dataset of descriptors furnished by user\n@param matches vector to host retrieved matches\n@param mask mask to select which input descriptors must be matched to one in dataset\n */\nvoid match( const Mat& queryDescriptors, const Mat& trainDescriptors, std::vector<DMatch>& matches, const Mat& mask = Mat() ) const;\n\n/** @overload\n@param queryDescriptors query descriptors\n@param matches vector to host retrieved matches\n@param masks vector of masks to select which input descriptors must be matched to one in dataset\n(the *i*-th mask in vector indicates whether each input query can be matched with descriptors in\ndataset relative to *i*-th image)\n*/\nvoid match( const Mat& queryDescriptors, std::vector<DMatch>& matches, const std::vector<Mat>& masks = std::vector<Mat>() );\n\n/** @brief For every input query descriptor, retrieve the best *k* matching ones from a dataset provided from\nuser or from the one internal to class\n\n@param queryDescriptors query descriptors\n@param trainDescriptors dataset of descriptors furnished by user\n@param matches vector to host retrieved matches\n@param k number of the closest descriptors to be returned for every input query\n@param mask mask to select which input descriptors must be matched to ones in dataset\n@param compactResult flag to obtain a compact result (if true, a vector that doesn't contain any\nmatches for a given query is not inserted in final result)\n */\nvoid knnMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, std::vector<std::vector<DMatch> >& matches, int k, const Mat& mask = Mat(),\nbool compactResult = false ) const;\n\n/** @overload\n@param queryDescriptors query descriptors\n@param matches vector to host retrieved matches\n@param k number of the closest descriptors to be returned for every input query\n@param masks vector of masks to select which input descriptors must be matched to ones in dataset\n(the *i*-th mask in vector indicates whether each input query can be matched with descriptors in\ndataset relative to *i*-th image)\n@param compactResult flag to obtain a compact result (if true, a vector that doesn't contain any\nmatches for a given query is not inserted in final result)\n*/\nvoid knnMatch( const Mat& queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, const std::vector<Mat>& masks = std::vector<Mat>(),\nbool compactResult = false );\n\n/** @brief For every input query descriptor, retrieve, from a dataset provided from user or from the one\ninternal to class, all the descriptors that are not further than *maxDist* from input query\n\n@param queryDescriptors query descriptors\n@param trainDescriptors dataset of descriptors furnished by user\n@param matches vector to host retrieved matches\n@param maxDistance search radius\n@param mask mask to select which input descriptors must be matched to ones in dataset\n@param compactResult flag to obtain a compact result (if true, a vector that doesn't contain any\nmatches for a given query is not inserted in final result)\n */\nvoid radiusMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,\nconst Mat& mask = Mat(), bool compactResult = false ) const;\n\n/** @overload\n@param queryDescriptors query descriptors\n@param matches vector to host retrieved matches\n@param maxDistance search radius\n@param masks vector of masks to select which input descriptors must be matched to ones in dataset\n(the *i*-th mask in vector indicates whether each input query can be matched with descriptors in\ndataset relative to *i*-th image)\n@param compactResult flag to obtain a compact result (if true, a vector that doesn't contain any\nmatches for a given query is not inserted in final result)\n*/\nvoid radiusMatch( const Mat& queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, const std::vector<Mat>& masks =\nstd::vector<Mat>(),\nbool compactResult = false );\n\n/** @brief Store locally new descriptors to be inserted in dataset, without updating dataset.\n\n@param descriptors matrices containing descriptors to be inserted into dataset\n\n@note Each matrix *i* in **descriptors** should contain descriptors relative to lines extracted from\n*i*-th image.\n */\nvoid add( const std::vector<Mat>& descriptors );\n\n/** @brief Update dataset by inserting into it all descriptors that were stored locally by *add* function.\n\n@note Every time this function is invoked, current dataset is deleted and locally stored descriptors\nare inserted into dataset. The locally stored copy of just inserted descriptors is then removed.\n */\nvoid train();\n\n/** @brief Create a BinaryDescriptorMatcher object and return a smart pointer to it.\n */\nstatic Ptr<BinaryDescriptorMatcher> createBinaryDescriptorMatcher();\n\n/** @brief Clear dataset and internal data\n */\nvoid clear();\n\n/** @brief Constructor.\n\nThe BinaryDescriptorMatcher constructed is able to store and manage 256-bits long entries.\n */\nBinaryDescriptorMatcher();\n\n/** destructor */\n~BinaryDescriptorMatcher()\n{\n}\n\nprivate:\nclass BucketGroup\n{\n\npublic:\n/** constructor */\nBucketGroup();\n\n/** destructor */\n~BucketGroup();\n\n/** insert data into the bucket */\nvoid insert( int subindex, UINT32 data );\n\n/** perform a query to the bucket */\nUINT32* query( int subindex, int *size );\n\n/** utility functions */\nvoid insert_value( std::vector<uint32_t>& vec, int index, UINT32 data );\nvoid push_value( std::vector<uint32_t>& vec, UINT32 Data );\n\n/** data fields */\nUINT32 empty;\nstd::vector<uint32_t> group;\n\n\n};\n\nclass SparseHashtable\n{\n\nprivate:\n\n/** Maximum bits per key before folding the table */\nstatic const int MAX_B;\n\n/** Bins (each bin is an Array object for duplicates of the same key) */\nBucketGroup *table;\n\npublic:\n\n/** constructor */\nSparseHashtable();\n\n/** destructor */\n~SparseHashtable();\n\n/** initializer */\nint init( int _b );\n\n/** insert data */\nvoid insert( UINT64 index, UINT32 data );\n\n/** query data */\nUINT32* query( UINT64 index, int* size );\n\n/** Bits per index */\nint b;\n\n/**  Number of bins */\nUINT64 size;\n\n};\n\n/** class defining a sequence of bits */\nclass bitarray\n{\n\npublic:\n/** pointer to bits sequence and sequence's length */\nUINT32 *arr;\nUINT32 length;\n\n/** constructor setting default values */\nbitarray()\n{\narr = NULL;\nlength = 0;\n}\n\n/** constructor setting sequence's length */\nbitarray( UINT64 _bits )\n{\ninit( _bits );\n}\n\n/** initializer of private fields */\nvoid init( UINT64 _bits )\n{\nlength = (UINT32) ceil( _bits / 32.00 );\narr = new UINT32[length];\nerase();\n}\n\n/** destructor */\n~bitarray()\n{\nif( arr )\ndelete[] arr;\n}\n\ninline void flip( UINT64 index )\n{\narr[index >> 5] ^= ( (UINT32) 0x01 ) << ( index % 32 );\n}\n\ninline void set( UINT64 index )\n{\narr[index >> 5] |= ( (UINT32) 0x01 ) << ( index % 32 );\n}\n\ninline UINT8 get( UINT64 index )\n{\nreturn ( arr[index >> 5] & ( ( (UINT32) 0x01 ) << ( index % 32 ) ) ) != 0;\n}\n\n/** reserve menory for an UINT32 */\ninline void erase()\n{\nmemset( arr, 0, sizeof(UINT32) * length );\n}\n\n};\n\nclass Mihasher\n{\n\npublic:\n/** Bits per code */\nint B;\n\n/** B/8 */\nint B_over_8;\n\n/** Bits per chunk (must be less than 64) */\nint b;\n\n/** Number of chunks */\nint m;\n\n/** Number of chunks with b bits (have 1 bit more than others) */\nint mplus;\n\n/** Maximum hamming search radius (we use B/2 by default) */\nint D;\n\n/** Maximum hamming search radius per substring */\nint d;\n\n/** Maximum results to return */\nint K;\n\n/** Number of codes */\nUINT64 N;\n\n/** Table of original full-length codes */\ncv::Mat codes;\n\n/** Counter for eliminating duplicate results (it is not thread safe) */\nbitarray *counter;\n\n/** Array of m hashtables */\nSparseHashtable *H;\n\n/** Volume of a b-bit Hamming ball with radius s (for s = 0 to d) */\nUINT32 *xornum;\n\n/** Used within generation of binary codes at a certain Hamming distance */\nint power[100];\n\n/** constructor */\nMihasher();\n\n/** desctructor */\n~Mihasher();\n\n/** constructor 2 */\nMihasher( int B, int m );\n\n/** K setter */\nvoid setK( int K );\n\n/** populate tables */\nvoid populate( cv::Mat & codes, UINT32 N, int dim1codes );\n\n/** execute a batch query */\nvoid batchquery( UINT32 * results, UINT32 *numres/*, qstat *stats*/, const cv::Mat & q, UINT32 numq, int dim1queries );\n\nprivate:\n\n/** execute a single query */\nvoid query( UINT32 * results, UINT32* numres/*, qstat *stats*/, UINT8 *q, UINT64 * chunks, UINT32 * res );\n};\n\n/** retrieve Hamming distances */\nvoid checkKDistances( UINT32 * numres, int k, std::vector<int>& k_distances, int row, int string_length ) const;\n\n/** matrix to store new descriptors */\nMat descriptorsMat;\n\n/** map storing where each bunch of descriptors benins in DS */\nstd::map<int, int> indexesMap;\n\n/** internal MiHaser representing dataset */\nMihasher* dataset;\n\n/** index from which next added descriptors' bunch must begin */\nint nextAddedIndex;\n\n/** number of images whose descriptors are stored in DS */\nint numImages;\n\n/** number of descriptors in dataset */\nint descrInDS;\n\n};\n\n/* --------------------------------------------------------------------------------------------\n UTILITY FUNCTIONS\n -------------------------------------------------------------------------------------------- */\n\n/** struct for drawing options */\nstruct CV_EXPORTS DrawLinesMatchesFlags\n{\nenum\n{\nDEFAULT = 0,  //!< Output image matrix will be created (Mat::create),\n              //!< i.e. existing memory of output image may be reused.\n              //!< Two source images, matches, and single keylines\n              //!< will be drawn.\nDRAW_OVER_OUTIMG = 1,//!< Output image matrix will not be\n//!< created (using Mat::create). Matches will be drawn\n//!< on existing content of output image.\nNOT_DRAW_SINGLE_LINES = 2//!< Single keylines will not be drawn.\n};\n};\n\n/** @brief Draws the found matches of keylines from two images.\n\n@param img1 first image\n@param keylines1 keylines extracted from first image\n@param img2 second image\n@param keylines2 keylines extracted from second image\n@param matches1to2 vector of matches\n@param outImg output matrix to draw on\n@param matchColor drawing color for matches (chosen randomly in case of default value)\n@param singleLineColor drawing color for keylines (chosen randomly in case of default value)\n@param matchesMask mask to indicate which matches must be drawn\n@param flags drawing flags, see DrawLinesMatchesFlags\n\n@note If both *matchColor* and *singleLineColor* are set to their default values, function draws\nmatched lines and line connecting them with same color\n */\nCV_EXPORTS void drawLineMatches( const Mat& img1, const std::vector<KeyLine>& keylines1, const Mat& img2, const std::vector<KeyLine>& keylines2,\n                                 const std::vector<DMatch>& matches1to2, Mat& outImg, const Scalar& matchColor = Scalar::all( -1 ),\n                                 const Scalar& singleLineColor = Scalar::all( -1 ), const std::vector<char>& matchesMask = std::vector<char>(),\n                                 int flags = DrawLinesMatchesFlags::DEFAULT );\n\n/** @brief Draws keylines.\n\n@param image input image\n@param keylines keylines to be drawn\n@param outImage output image to draw on\n@param color color of lines to be drawn (if set to defaul value, color is chosen randomly)\n@param flags drawing flags\n */\nCV_EXPORTS void drawKeylines( const Mat& image, const std::vector<KeyLine>& keylines, Mat& outImage, const Scalar& color = Scalar::all( -1 ),\n                              int flags = DrawLinesMatchesFlags::DEFAULT );\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/line_descriptor.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_LINE_DESCRIPTOR_HPP__\n#define __OPENCV_LINE_DESCRIPTOR_HPP__\n\n#include \"opencv2/line_descriptor/descriptor.hpp\"\n\n/** @defgroup line_descriptor Binary descriptors for lines extracted from an image\n\nIntroduction\n------------\n\nOne of the most challenging activities in computer vision is the extraction of useful information\nfrom a given image. Such information, usually comes in the form of points that preserve some kind of\nproperty (for instance, they are scale-invariant) and are actually representative of input image.\n\nThe goal of this module is seeking a new kind of representative information inside an image and\nproviding the functionalities for its extraction and representation. In particular, differently from\nprevious methods for detection of relevant elements inside an image, lines are extracted in place of\npoints; a new class is defined ad hoc to summarize a line's properties, for reuse and plotting\npurposes.\n\nComputation of binary descriptors\n---------------------------------\n\nTo obtatin a binary descriptor representing a certain line detected from a certain octave of an\nimage, we first compute a non-binary descriptor as described in @cite LBD . Such algorithm works on\nlines extracted using EDLine detector, as explained in @cite EDL . Given a line, we consider a\nrectangular region centered at it and called *line support region (LSR)*. Such region is divided\ninto a set of bands \\f$\\{B_1, B_2, ..., B_m\\}\\f$, whose length equals the one of line.\n\nIf we indicate with \\f$\\bf{d}_L\\f$ the direction of line, the orthogonal and clockwise direction to line\n\\f$\\bf{d}_{\\perp}\\f$ can be determined; these two directions, are used to construct a reference frame\ncentered in the middle point of line. The gradients of pixels \\f$\\bf{g'}\\f$ inside LSR can be projected\nto the newly determined frame, obtaining their local equivalent\n\\f$\\bf{g'} = (\\bf{g}^T \\cdot \\bf{d}_{\\perp}, \\bf{g}^T \\cdot \\bf{d}_L)^T \\triangleq (\\bf{g'}_{d_{\\perp}}, \\bf{g'}_{d_L})^T\\f$.\n\nLater on, a Gaussian function is applied to all LSR's pixels along \\f$\\bf{d}_\\perp\\f$ direction; first,\nwe assign a global weighting coefficient \\f$f_g(i) = (1/\\sqrt{2\\pi}\\sigma_g)e^{-d^2_i/2\\sigma^2_g}\\f$ to\n*i*-th row in LSR, where \\f$d_i\\f$ is the distance of *i*-th row from the center row in LSR,\n\\f$\\sigma_g = 0.5(m \\cdot w - 1)\\f$ and \\f$w\\f$ is the width of bands (the same for every band). Secondly,\nconsidering a band \\f$B_j\\f$ and its neighbor bands \\f$B_{j-1}, B_{j+1}\\f$, we assign a local weighting\n\\f$F_l(k) = (1/\\sqrt{2\\pi}\\sigma_l)e^{-d'^2_k/2\\sigma_l^2}\\f$, where \\f$d'_k\\f$ is the distance of *k*-th\nrow from the center row in \\f$B_j\\f$ and \\f$\\sigma_l = w\\f$. Using the global and local weights, we obtain,\nat the same time, the reduction of role played by gradients far from line and of boundary effect,\nrespectively.\n\nEach band \\f$B_j\\f$ in LSR has an associated *band descriptor(BD)* which is computed considering\nprevious and next band (top and bottom bands are ignored when computing descriptor for first and\nlast band). Once each band has been assignen its BD, the LBD descriptor of line is simply given by\n\n\\f[LBD = (BD_1^T, BD_2^T, ... , BD^T_m)^T.\\f]\n\nTo compute a band descriptor \\f$B_j\\f$, each *k*-th row in it is considered and the gradients in such\nrow are accumulated:\n\n\\f[\\begin{matrix} \\bf{V1}^k_j = \\lambda \\sum\\limits_{\\bf{g}'_{d_\\perp}>0}\\bf{g}'_{d_\\perp}, &  \\bf{V2}^k_j = \\lambda \\sum\\limits_{\\bf{g}'_{d_\\perp}<0} -\\bf{g}'_{d_\\perp}, \\\\ \\bf{V3}^k_j = \\lambda \\sum\\limits_{\\bf{g}'_{d_L}>0}\\bf{g}'_{d_L}, & \\bf{V4}^k_j = \\lambda \\sum\\limits_{\\bf{g}'_{d_L}<0} -\\bf{g}'_{d_L}\\end{matrix}.\\f]\n\nwith \\f$\\lambda = f_g(k)f_l(k)\\f$.\n\nBy stacking previous results, we obtain the *band description matrix (BDM)*\n\n\\f[BDM_j = \\left(\\begin{matrix} \\bf{V1}_j^1 & \\bf{V1}_j^2 & \\ldots & \\bf{V1}_j^n \\\\ \\bf{V2}_j^1 & \\bf{V2}_j^2 & \\ldots & \\bf{V2}_j^n \\\\ \\bf{V3}_j^1 & \\bf{V3}_j^2 & \\ldots & \\bf{V3}_j^n \\\\ \\bf{V4}_j^1 & \\bf{V4}_j^2 & \\ldots & \\bf{V4}_j^n \\end{matrix} \\right) \\in \\mathbb{R}^{4\\times n},\\f]\n\nwith \\f$n\\f$ the number of rows in band \\f$B_j\\f$:\n\n\\f[n = \\begin{cases} 2w, & j = 1||m; \\\\ 3w, & \\mbox{else}. \\end{cases}\\f]\n\nEach \\f$BD_j\\f$ can be obtained using the standard deviation vector \\f$S_j\\f$ and mean vector \\f$M_j\\f$ of\n\\f$BDM_J\\f$. Thus, finally:\n\n\\f[LBD = (M_1^T, S_1^T, M_2^T, S_2^T, \\ldots, M_m^T, S_m^T)^T \\in \\mathbb{R}^{8m}\\f]\n\nOnce the LBD has been obtained, it must be converted into a binary form. For such purpose, we\nconsider 32 possible pairs of BD inside it; each couple of BD is compared bit by bit and comparison\ngenerates an 8 bit string. Concatenating 32 comparison strings, we get the 256-bit final binary\nrepresentation of a single LBD.\n*/\n\n#endif \n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ml/ml.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/ml.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ml.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000, Intel Corporation, all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Copyright (C) 2014, Itseez Inc, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_ML_HPP__\n#define __OPENCV_ML_HPP__\n\n#ifdef __cplusplus\n#  include \"opencv2/core.hpp\"\n#endif\n\n#ifdef __cplusplus\n\n#include <float.h>\n#include <map>\n#include <iostream>\n\n/**\n  @defgroup ml Machine Learning\n\n  The Machine Learning Library (MLL) is a set of classes and functions for statistical\n  classification, regression, and clustering of data.\n\n  Most of the classification and regression algorithms are implemented as C++ classes. As the\n  algorithms have different sets of features (like an ability to handle missing measurements or\n  categorical input variables), there is a little common ground between the classes. This common\n  ground is defined by the class cv::ml::StatModel that all the other ML classes are derived from.\n\n  See detailed overview here: @ref ml_intro.\n */\n\nnamespace cv\n{\n\nnamespace ml\n{\n\n//! @addtogroup ml\n//! @{\n\n/** @brief Variable types */\nenum VariableTypes\n{\n    VAR_NUMERICAL    =0, //!< same as VAR_ORDERED\n    VAR_ORDERED      =0, //!< ordered variables\n    VAR_CATEGORICAL  =1  //!< categorical variables\n};\n\n/** @brief %Error types */\nenum ErrorTypes\n{\n    TEST_ERROR = 0,\n    TRAIN_ERROR = 1\n};\n\n/** @brief Sample types */\nenum SampleTypes\n{\n    ROW_SAMPLE = 0, //!< each training sample is a row of samples\n    COL_SAMPLE = 1  //!< each training sample occupies a column of samples\n};\n\n/** @brief The structure represents the logarithmic grid range of statmodel parameters.\n\nIt is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate\nbeing computed by cross-validation.\n */\nclass CV_EXPORTS ParamGrid\n{\npublic:\n    /** @brief Default constructor */\n    ParamGrid();\n    /** @brief Constructor with parameters */\n    ParamGrid(double _minVal, double _maxVal, double _logStep);\n\n    double minVal; //!< Minimum value of the statmodel parameter. Default value is 0.\n    double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0.\n    /** @brief Logarithmic step for iterating the statmodel parameter.\n\n    The grid determines the following iteration sequence of the statmodel parameter values:\n    \\f[(minVal, minVal*step, minVal*{step}^2, \\dots,  minVal*{logStep}^n),\\f]\n    where \\f$n\\f$ is the maximal index satisfying\n    \\f[\\texttt{minVal} * \\texttt{logStep} ^n <  \\texttt{maxVal}\\f]\n    The grid is logarithmic, so logStep must always be greater then 1. Default value is 1.\n    */\n    double logStep;\n};\n\n/** @brief Class encapsulating training data.\n\nPlease note that the class only specifies the interface of training data, but not implementation.\nAll the statistical model classes in _ml_ module accepts Ptr\\<TrainData\\> as parameter. In other\nwords, you can create your own class derived from TrainData and pass smart pointer to the instance\nof this class into StatModel::train.\n\n@sa @ref ml_intro_data\n */\nclass CV_EXPORTS_W TrainData\n{\npublic:\n    static inline float missingValue() { return FLT_MAX; }\n    virtual ~TrainData();\n\n    CV_WRAP virtual int getLayout() const = 0;\n    CV_WRAP virtual int getNTrainSamples() const = 0;\n    CV_WRAP virtual int getNTestSamples() const = 0;\n    CV_WRAP virtual int getNSamples() const = 0;\n    CV_WRAP virtual int getNVars() const = 0;\n    CV_WRAP virtual int getNAllVars() const = 0;\n\n    CV_WRAP virtual void getSample(InputArray varIdx, int sidx, float* buf) const = 0;\n    CV_WRAP virtual Mat getSamples() const = 0;\n    CV_WRAP virtual Mat getMissing() const = 0;\n\n    /** @brief Returns matrix of train samples\n\n    @param layout The requested layout. If it's different from the initial one, the matrix is\n        transposed. See ml::SampleTypes.\n    @param compressSamples if true, the function returns only the training samples (specified by\n        sampleIdx)\n    @param compressVars if true, the function returns the shorter training samples, containing only\n        the active variables.\n\n    In current implementation the function tries to avoid physical data copying and returns the\n    matrix stored inside TrainData (unless the transposition or compression is needed).\n     */\n    CV_WRAP virtual Mat getTrainSamples(int layout=ROW_SAMPLE,\n                                bool compressSamples=true,\n                                bool compressVars=true) const = 0;\n\n    /** @brief Returns the vector of responses\n\n    The function returns ordered or the original categorical responses. Usually it's used in\n    regression algorithms.\n     */\n    CV_WRAP virtual Mat getTrainResponses() const = 0;\n\n    /** @brief Returns the vector of normalized categorical responses\n\n    The function returns vector of responses. Each response is integer from `0` to `<number of\n    classes>-1`. The actual label value can be retrieved then from the class label vector, see\n    TrainData::getClassLabels.\n     */\n    CV_WRAP virtual Mat getTrainNormCatResponses() const = 0;\n    CV_WRAP virtual Mat getTestResponses() const = 0;\n    CV_WRAP virtual Mat getTestNormCatResponses() const = 0;\n    CV_WRAP virtual Mat getResponses() const = 0;\n    CV_WRAP virtual Mat getNormCatResponses() const = 0;\n    CV_WRAP virtual Mat getSampleWeights() const = 0;\n    CV_WRAP virtual Mat getTrainSampleWeights() const = 0;\n    CV_WRAP virtual Mat getTestSampleWeights() const = 0;\n    CV_WRAP virtual Mat getVarIdx() const = 0;\n    CV_WRAP virtual Mat getVarType() const = 0;\n    CV_WRAP virtual int getResponseType() const = 0;\n    CV_WRAP virtual Mat getTrainSampleIdx() const = 0;\n    CV_WRAP virtual Mat getTestSampleIdx() const = 0;\n    CV_WRAP virtual void getValues(int vi, InputArray sidx, float* values) const = 0;\n    virtual void getNormCatValues(int vi, InputArray sidx, int* values) const = 0;\n    CV_WRAP virtual Mat getDefaultSubstValues() const = 0;\n\n    CV_WRAP virtual int getCatCount(int vi) const = 0;\n\n    /** @brief Returns the vector of class labels\n\n    The function returns vector of unique labels occurred in the responses.\n     */\n    CV_WRAP virtual Mat getClassLabels() const = 0;\n\n    CV_WRAP virtual Mat getCatOfs() const = 0;\n    CV_WRAP virtual Mat getCatMap() const = 0;\n\n    /** @brief Splits the training data into the training and test parts\n    @sa TrainData::setTrainTestSplitRatio\n     */\n    CV_WRAP virtual void setTrainTestSplit(int count, bool shuffle=true) = 0;\n\n    /** @brief Splits the training data into the training and test parts\n\n    The function selects a subset of specified relative size and then returns it as the training\n    set. If the function is not called, all the data is used for training. Please, note that for\n    each of TrainData::getTrain\\* there is corresponding TrainData::getTest\\*, so that the test\n    subset can be retrieved and processed as well.\n    @sa TrainData::setTrainTestSplit\n     */\n    CV_WRAP virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0;\n    CV_WRAP virtual void shuffleTrainTest() = 0;\n\n    CV_WRAP static Mat getSubVector(const Mat& vec, const Mat& idx);\n\n    /** @brief Reads the dataset from a .csv file and returns the ready-to-use training data.\n\n    @param filename The input file name\n    @param headerLineCount The number of lines in the beginning to skip; besides the header, the\n        function also skips empty lines and lines staring with `#`\n    @param responseStartIdx Index of the first output variable. If -1, the function considers the\n        last variable as the response\n    @param responseEndIdx Index of the last output variable + 1. If -1, then there is single\n        response variable at responseStartIdx.\n    @param varTypeSpec The optional text string that specifies the variables' types. It has the\n        format `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables from `n1 to n2`\n        (inclusive range), `n3`, `n4 to n5` ... are considered ordered and `n6`, `n7 to n8` ... are\n        considered as categorical. The range `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]`\n        should cover all the variables. If varTypeSpec is not specified, then algorithm uses the\n        following rules:\n        - all input variables are considered ordered by default. If some column contains has non-\n          numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding\n          variable is considered categorical.\n        - if there are several output variables, they are all considered as ordered. Error is\n          reported when non-numerical values are used.\n        - if there is a single output variable, then if its values are non-numerical or are all\n          integers, then it's considered categorical. Otherwise, it's considered ordered.\n    @param delimiter The character used to separate values in each line.\n    @param missch The character used to specify missing measurements. It should not be a digit.\n        Although it's a non-numerical value, it surely does not affect the decision of whether the\n        variable ordered or categorical.\n    @note If the dataset only contains input variables and no responses, use responseStartIdx = -2\n        and responseEndIdx = 0. The output variables vector will just contain zeros.\n     */\n    static Ptr<TrainData> loadFromCSV(const String& filename,\n                                      int headerLineCount,\n                                      int responseStartIdx=-1,\n                                      int responseEndIdx=-1,\n                                      const String& varTypeSpec=String(),\n                                      char delimiter=',',\n                                      char missch='?');\n\n    /** @brief Creates training data from in-memory arrays.\n\n    @param samples matrix of samples. It should have CV_32F type.\n    @param layout see ml::SampleTypes.\n    @param responses matrix of responses. If the responses are scalar, they should be stored as a\n        single row or as a single column. The matrix should have type CV_32F or CV_32S (in the\n        former case the responses are considered as ordered by default; in the latter case - as\n        categorical)\n    @param varIdx vector specifying which variables to use for training. It can be an integer vector\n        (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of\n        active variables.\n    @param sampleIdx vector specifying which samples to use for training. It can be an integer\n        vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask\n        of training samples.\n    @param sampleWeights optional vector with weights for each sample. It should have CV_32F type.\n    @param varType optional vector of type CV_8U and size `<number_of_variables_in_samples> +\n        <number_of_variables_in_responses>`, containing types of each input and output variable. See\n        ml::VariableTypes.\n     */\n    CV_WRAP static Ptr<TrainData> create(InputArray samples, int layout, InputArray responses,\n                                 InputArray varIdx=noArray(), InputArray sampleIdx=noArray(),\n                                 InputArray sampleWeights=noArray(), InputArray varType=noArray());\n};\n\n/** @brief Base class for statistical models in OpenCV ML.\n */\nclass CV_EXPORTS_W StatModel : public Algorithm\n{\npublic:\n    /** Predict options */\n    enum Flags {\n        UPDATE_MODEL = 1,\n        RAW_OUTPUT=1, //!< makes the method return the raw results (the sum), not the class label\n        COMPRESSED_INPUT=2,\n        PREPROCESSED_INPUT=4\n    };\n\n    /** @brief Returns the number of variables in training samples */\n    CV_WRAP virtual int getVarCount() const = 0;\n\n    CV_WRAP virtual bool empty() const;\n\n    /** @brief Returns true if the model is trained */\n    CV_WRAP virtual bool isTrained() const = 0;\n    /** @brief Returns true if the model is classifier */\n    CV_WRAP virtual bool isClassifier() const = 0;\n\n    /** @brief Trains the statistical model\n\n    @param trainData training data that can be loaded from file using TrainData::loadFromCSV or\n        created with TrainData::create.\n    @param flags optional flags, depending on the model. Some of the models can be updated with the\n        new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP).\n     */\n    CV_WRAP virtual bool train( const Ptr<TrainData>& trainData, int flags=0 );\n\n    /** @brief Trains the statistical model\n\n    @param samples training samples\n    @param layout See ml::SampleTypes.\n    @param responses vector of responses associated with the training samples.\n    */\n    CV_WRAP virtual bool train( InputArray samples, int layout, InputArray responses );\n\n    /** @brief Computes error on the training or test dataset\n\n    @param data the training data\n    @param test if true, the error is computed over the test subset of the data, otherwise it's\n        computed over the training subset of the data. Please note that if you loaded a completely\n        different dataset to evaluate already trained classifier, you will probably want not to set\n        the test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so\n        that the error is computed for the whole new set. Yes, this sounds a bit confusing.\n    @param resp the optional output responses.\n\n    The method uses StatModel::predict to compute the error. For regression models the error is\n    computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%).\n     */\n    CV_WRAP virtual float calcError( const Ptr<TrainData>& data, bool test, OutputArray resp ) const;\n\n    /** @brief Predicts response(s) for the provided sample(s)\n\n    @param samples The input samples, floating-point matrix\n    @param results The optional output matrix of results.\n    @param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags.\n     */\n    CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;\n\n    /** @brief Create and train model with default parameters\n\n    The class must implement static `create()` method with no parameters or with all default parameter values\n    */\n    template<typename _Tp> static Ptr<_Tp> train(const Ptr<TrainData>& data, int flags=0)\n    {\n        Ptr<_Tp> model = _Tp::create();\n        return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>();\n    }\n};\n\n/****************************************************************************************\\\n*                                 Normal Bayes Classifier                                *\n\\****************************************************************************************/\n\n/** @brief Bayes classifier for normally distributed data.\n\n@sa @ref ml_intro_bayes\n */\nclass CV_EXPORTS_W NormalBayesClassifier : public StatModel\n{\npublic:\n    /** @brief Predicts the response for sample(s).\n\n    The method estimates the most probable classes for input vectors. Input vectors (one or more)\n    are stored as rows of the matrix inputs. In case of multiple input vectors, there should be one\n    output vector outputs. The predicted class for a single input vector is returned by the method.\n    The vector outputProbs contains the output probabilities corresponding to each element of\n    result.\n     */\n    CV_WRAP virtual float predictProb( InputArray inputs, OutputArray outputs,\n                               OutputArray outputProbs, int flags=0 ) const = 0;\n\n    /** Creates empty model\n    Use StatModel::train to train the model after creation. */\n    CV_WRAP static Ptr<NormalBayesClassifier> create();\n};\n\n/****************************************************************************************\\\n*                          K-Nearest Neighbour Classifier                                *\n\\****************************************************************************************/\n\n/** @brief The class implements K-Nearest Neighbors model\n\n@sa @ref ml_intro_knn\n */\nclass CV_EXPORTS_W KNearest : public StatModel\n{\npublic:\n\n    /** Default number of neighbors to use in predict method. */\n    /** @see setDefaultK */\n    CV_WRAP virtual int getDefaultK() const = 0;\n    /** @copybrief getDefaultK @see getDefaultK */\n    CV_WRAP virtual void setDefaultK(int val) = 0;\n\n    /** Whether classification or regression model should be trained. */\n    /** @see setIsClassifier */\n    CV_WRAP virtual bool getIsClassifier() const = 0;\n    /** @copybrief getIsClassifier @see getIsClassifier */\n    CV_WRAP virtual void setIsClassifier(bool val) = 0;\n\n    /** Parameter for KDTree implementation. */\n    /** @see setEmax */\n    CV_WRAP virtual int getEmax() const = 0;\n    /** @copybrief getEmax @see getEmax */\n    CV_WRAP virtual void setEmax(int val) = 0;\n\n    /** %Algorithm type, one of KNearest::Types. */\n    /** @see setAlgorithmType */\n    CV_WRAP virtual int getAlgorithmType() const = 0;\n    /** @copybrief getAlgorithmType @see getAlgorithmType */\n    CV_WRAP virtual void setAlgorithmType(int val) = 0;\n\n    /** @brief Finds the neighbors and predicts responses for input vectors.\n\n    @param samples Input samples stored by rows. It is a single-precision floating-point matrix of\n        `<number_of_samples> * k` size.\n    @param k Number of used nearest neighbors. Should be greater than 1.\n    @param results Vector with results of prediction (regression or classification) for each input\n        sample. It is a single-precision floating-point vector with `<number_of_samples>` elements.\n    @param neighborResponses Optional output values for corresponding neighbors. It is a single-\n        precision floating-point matrix of `<number_of_samples> * k` size.\n    @param dist Optional output distances from the input vectors to the corresponding neighbors. It\n        is a single-precision floating-point matrix of `<number_of_samples> * k` size.\n\n    For each input vector (a row of the matrix samples), the method finds the k nearest neighbors.\n    In case of regression, the predicted result is a mean value of the particular vector's neighbor\n    responses. In case of classification, the class is determined by voting.\n\n    For each input vector, the neighbors are sorted by their distances to the vector.\n\n    In case of C++ interface you can use output pointers to empty matrices and the function will\n    allocate memory itself.\n\n    If only a single input vector is passed, all output matrices are optional and the predicted\n    value is returned by the method.\n\n    The function is parallelized with the TBB library.\n     */\n    CV_WRAP virtual float findNearest( InputArray samples, int k,\n                               OutputArray results,\n                               OutputArray neighborResponses=noArray(),\n                               OutputArray dist=noArray() ) const = 0;\n\n    /** @brief Implementations of KNearest algorithm\n       */\n    enum Types\n    {\n        BRUTE_FORCE=1,\n        KDTREE=2\n    };\n\n    /** @brief Creates the empty model\n\n    The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method.\n     */\n    CV_WRAP static Ptr<KNearest> create();\n};\n\n/****************************************************************************************\\\n*                                   Support Vector Machines                              *\n\\****************************************************************************************/\n\n/** @brief Support Vector Machines.\n\n@sa @ref ml_intro_svm\n */\nclass CV_EXPORTS_W SVM : public StatModel\n{\npublic:\n\n    class CV_EXPORTS Kernel : public Algorithm\n    {\n    public:\n        virtual int getType() const = 0;\n        virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0;\n    };\n\n    /** Type of a %SVM formulation.\n    See SVM::Types. Default value is SVM::C_SVC. */\n    /** @see setType */\n    CV_WRAP virtual int getType() const = 0;\n    /** @copybrief getType @see getType */\n    CV_WRAP virtual void setType(int val) = 0;\n\n    /** Parameter \\f$\\gamma\\f$ of a kernel function.\n    For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */\n    /** @see setGamma */\n    CV_WRAP virtual double getGamma() const = 0;\n    /** @copybrief getGamma @see getGamma */\n    CV_WRAP virtual void setGamma(double val) = 0;\n\n    /** Parameter _coef0_ of a kernel function.\n    For SVM::POLY or SVM::SIGMOID. Default value is 0.*/\n    /** @see setCoef0 */\n    CV_WRAP virtual double getCoef0() const = 0;\n    /** @copybrief getCoef0 @see getCoef0 */\n    CV_WRAP virtual void setCoef0(double val) = 0;\n\n    /** Parameter _degree_ of a kernel function.\n    For SVM::POLY. Default value is 0. */\n    /** @see setDegree */\n    CV_WRAP virtual double getDegree() const = 0;\n    /** @copybrief getDegree @see getDegree */\n    CV_WRAP virtual void setDegree(double val) = 0;\n\n    /** Parameter _C_ of a %SVM optimization problem.\n    For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */\n    /** @see setC */\n    CV_WRAP virtual double getC() const = 0;\n    /** @copybrief getC @see getC */\n    CV_WRAP virtual void setC(double val) = 0;\n\n    /** Parameter \\f$\\nu\\f$ of a %SVM optimization problem.\n    For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */\n    /** @see setNu */\n    CV_WRAP virtual double getNu() const = 0;\n    /** @copybrief getNu @see getNu */\n    CV_WRAP virtual void setNu(double val) = 0;\n\n    /** Parameter \\f$\\epsilon\\f$ of a %SVM optimization problem.\n    For SVM::EPS_SVR. Default value is 0. */\n    /** @see setP */\n    CV_WRAP virtual double getP() const = 0;\n    /** @copybrief getP @see getP */\n    CV_WRAP virtual void setP(double val) = 0;\n\n    /** Optional weights in the SVM::C_SVC problem, assigned to particular classes.\n    They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus\n    these weights affect the misclassification penalty for different classes. The larger weight,\n    the larger penalty on misclassification of data from the corresponding class. Default value is\n    empty Mat. */\n    /** @see setClassWeights */\n    CV_WRAP virtual cv::Mat getClassWeights() const = 0;\n    /** @copybrief getClassWeights @see getClassWeights */\n    CV_WRAP virtual void setClassWeights(const cv::Mat &val) = 0;\n\n    /** Termination criteria of the iterative %SVM training procedure which solves a partial\n    case of constrained quadratic optimization problem.\n    You can specify tolerance and/or the maximum number of iterations. Default value is\n    `TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */\n    /** @see setTermCriteria */\n    CV_WRAP virtual cv::TermCriteria getTermCriteria() const = 0;\n    /** @copybrief getTermCriteria @see getTermCriteria */\n    CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0;\n\n    /** Type of a %SVM kernel.\n    See SVM::KernelTypes. Default value is SVM::RBF. */\n    CV_WRAP virtual int getKernelType() const = 0;\n\n    /** Initialize with one of predefined kernels.\n    See SVM::KernelTypes. */\n    CV_WRAP virtual void setKernel(int kernelType) = 0;\n\n    /** Initialize with custom kernel.\n    See SVM::Kernel class for implementation details */\n    virtual void setCustomKernel(const Ptr<Kernel> &_kernel) = 0;\n\n    //! %SVM type\n    enum Types {\n        /** C-Support Vector Classification. n-class classification (n \\f$\\geq\\f$ 2), allows\n        imperfect separation of classes with penalty multiplier C for outliers. */\n        C_SVC=100,\n        /** \\f$\\nu\\f$-Support Vector Classification. n-class classification with possible\n        imperfect separation. Parameter \\f$\\nu\\f$ (in the range 0..1, the larger the value, the smoother\n        the decision boundary) is used instead of C. */\n        NU_SVC=101,\n        /** Distribution Estimation (One-class %SVM). All the training data are from\n        the same class, %SVM builds a boundary that separates the class from the rest of the feature\n        space. */\n        ONE_CLASS=102,\n        /** \\f$\\epsilon\\f$-Support Vector Regression. The distance between feature vectors\n        from the training set and the fitting hyper-plane must be less than p. For outliers the\n        penalty multiplier C is used. */\n        EPS_SVR=103,\n        /** \\f$\\nu\\f$-Support Vector Regression. \\f$\\nu\\f$ is used instead of p.\n        See @cite LibSVM for details. */\n        NU_SVR=104\n    };\n\n    /** @brief %SVM kernel type\n\n    A comparison of different kernels on the following 2D test case with four classes. Four\n    SVM::C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three\n    different kernels (SVM::CHI2, SVM::INTER, SVM::RBF). The color depicts the class with max score.\n    Bright means max-score \\> 0, dark means max-score \\< 0.\n    ![image](pics/SVM_Comparison.png)\n    */\n    enum KernelTypes {\n        /** Returned by SVM::getKernelType in case when custom kernel has been set */\n        CUSTOM=-1,\n        /** Linear kernel. No mapping is done, linear discrimination (or regression) is\n        done in the original feature space. It is the fastest option. \\f$K(x_i, x_j) = x_i^T x_j\\f$. */\n        LINEAR=0,\n        /** Polynomial kernel:\n        \\f$K(x_i, x_j) = (\\gamma x_i^T x_j + coef0)^{degree}, \\gamma > 0\\f$. */\n        POLY=1,\n        /** Radial basis function (RBF), a good choice in most cases.\n        \\f$K(x_i, x_j) = e^{-\\gamma ||x_i - x_j||^2}, \\gamma > 0\\f$. */\n        RBF=2,\n        /** Sigmoid kernel: \\f$K(x_i, x_j) = \\tanh(\\gamma x_i^T x_j + coef0)\\f$. */\n        SIGMOID=3,\n        /** Exponential Chi2 kernel, similar to the RBF kernel:\n        \\f$K(x_i, x_j) = e^{-\\gamma \\chi^2(x_i,x_j)}, \\chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \\gamma > 0\\f$. */\n        CHI2=4,\n        /** Histogram intersection kernel. A fast kernel. \\f$K(x_i, x_j) = min(x_i,x_j)\\f$. */\n        INTER=5\n    };\n\n    //! %SVM params type\n    enum ParamTypes {\n        C=0,\n        GAMMA=1,\n        P=2,\n        NU=3,\n        COEF=4,\n        DEGREE=5\n    };\n\n    /** @brief Trains an %SVM with optimal parameters.\n\n    @param data the training data that can be constructed using TrainData::create or\n        TrainData::loadFromCSV.\n    @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One\n        subset is used to test the model, the others form the train set. So, the %SVM algorithm is\n        executed kFold times.\n    @param Cgrid grid for C\n    @param gammaGrid grid for gamma\n    @param pGrid grid for p\n    @param nuGrid grid for nu\n    @param coeffGrid grid for coeff\n    @param degreeGrid grid for degree\n    @param balanced If true and the problem is 2-class classification then the method creates more\n        balanced cross-validation subsets that is proportions between classes in subsets are close\n        to such proportion in the whole train dataset.\n\n    The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p,\n    nu, coef0, degree. Parameters are considered optimal when the cross-validation\n    estimate of the test set error is minimal.\n\n    If there is no need to optimize a parameter, the corresponding grid step should be set to any\n    value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step\n    = 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value\n    `Gamma` is taken for gamma.\n\n    And, finally, if the optimization in a parameter is required but the corresponding grid is\n    unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for\n    gamma, call `SVM::getDefaultGrid(SVM::GAMMA)`.\n\n    This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the\n    regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and\n    the usual %SVM with parameters specified in params is executed.\n     */\n    virtual bool trainAuto( const Ptr<TrainData>& data, int kFold = 10,\n                    ParamGrid Cgrid = SVM::getDefaultGrid(SVM::C),\n                    ParamGrid gammaGrid  = SVM::getDefaultGrid(SVM::GAMMA),\n                    ParamGrid pGrid      = SVM::getDefaultGrid(SVM::P),\n                    ParamGrid nuGrid     = SVM::getDefaultGrid(SVM::NU),\n                    ParamGrid coeffGrid  = SVM::getDefaultGrid(SVM::COEF),\n                    ParamGrid degreeGrid = SVM::getDefaultGrid(SVM::DEGREE),\n                    bool balanced=false) = 0;\n\n    /** @brief Retrieves all the support vectors\n\n    The method returns all the support vectors as a floating-point matrix, where support vectors are\n    stored as matrix rows.\n     */\n    CV_WRAP virtual Mat getSupportVectors() const = 0;\n\n    /** @brief Retrieves all the uncompressed support vectors of a linear %SVM\n\n    The method returns all the uncompressed support vectors of a linear %SVM that the compressed\n    support vector, used for prediction, was derived from. They are returned in a floating-point\n    matrix, where the support vectors are stored as matrix rows.\n     */\n    CV_WRAP Mat getUncompressedSupportVectors() const;\n\n    /** @brief Retrieves the decision function\n\n    @param i the index of the decision function. If the problem solved is regression, 1-class or\n        2-class classification, then there will be just one decision function and the index should\n        always be 0. Otherwise, in the case of N-class classification, there will be \\f$N(N-1)/2\\f$\n        decision functions.\n    @param alpha the optional output vector for weights, corresponding to different support vectors.\n        In the case of linear %SVM all the alpha's will be 1's.\n    @param svidx the optional output vector of indices of support vectors within the matrix of\n        support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear\n        %SVM each decision function consists of a single \"compressed\" support vector.\n\n    The method returns rho parameter of the decision function, a scalar subtracted from the weighted\n    sum of kernel responses.\n     */\n    CV_WRAP virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0;\n\n    /** @brief Generates a grid for %SVM parameters.\n\n    @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is\n    generated for the parameter with this ID.\n\n    The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be\n    passed to the function SVM::trainAuto.\n     */\n    static ParamGrid getDefaultGrid( int param_id );\n\n    /** Creates empty model.\n    Use StatModel::train to train the model. Since %SVM has several parameters, you may want to\n    find the best parameters for your problem, it can be done with SVM::trainAuto. */\n    CV_WRAP static Ptr<SVM> create();\n};\n\n/****************************************************************************************\\\n*                              Expectation - Maximization                                *\n\\****************************************************************************************/\n\n/** @brief The class implements the Expectation Maximization algorithm.\n\n@sa @ref ml_intro_em\n */\nclass CV_EXPORTS_W EM : public StatModel\n{\npublic:\n    //! Type of covariation matrices\n    enum Types {\n        /** A scaled identity matrix \\f$\\mu_k * I\\f$. There is the only\n        parameter \\f$\\mu_k\\f$ to be estimated for each matrix. The option may be used in special cases,\n        when the constraint is relevant, or as a first step in the optimization (for example in case\n        when the data is preprocessed with PCA). The results of such preliminary estimation may be\n        passed again to the optimization procedure, this time with\n        covMatType=EM::COV_MAT_DIAGONAL. */\n        COV_MAT_SPHERICAL=0,\n        /** A diagonal matrix with positive diagonal elements. The number of\n        free parameters is d for each matrix. This is most commonly used option yielding good\n        estimation results. */\n        COV_MAT_DIAGONAL=1,\n        /** A symmetric positively defined matrix. The number of free\n        parameters in each matrix is about \\f$d^2/2\\f$. It is not recommended to use this option, unless\n        there is pretty accurate initial estimation of the parameters and/or a huge number of\n        training samples. */\n        COV_MAT_GENERIC=2,\n        COV_MAT_DEFAULT=COV_MAT_DIAGONAL\n    };\n\n    //! Default parameters\n    enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};\n\n    //! The initial step\n    enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};\n\n    /** The number of mixture components in the Gaussian mixture model.\n    Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could\n    determine the optimal number of mixtures within a specified value range, but that is not the\n    case in ML yet. */\n    /** @see setClustersNumber */\n    CV_WRAP virtual int getClustersNumber() const = 0;\n    /** @copybrief getClustersNumber @see getClustersNumber */\n    CV_WRAP virtual void setClustersNumber(int val) = 0;\n\n    /** Constraint on covariance matrices which defines type of matrices.\n    See EM::Types. */\n    /** @see setCovarianceMatrixType */\n    CV_WRAP virtual int getCovarianceMatrixType() const = 0;\n    /** @copybrief getCovarianceMatrixType @see getCovarianceMatrixType */\n    CV_WRAP virtual void setCovarianceMatrixType(int val) = 0;\n\n    /** The termination criteria of the %EM algorithm.\n    The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of\n    M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default\n    maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */\n    /** @see setTermCriteria */\n    CV_WRAP virtual TermCriteria getTermCriteria() const = 0;\n    /** @copybrief getTermCriteria @see getTermCriteria */\n    CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0;\n\n    /** @brief Returns weights of the mixtures\n\n    Returns vector with the number of elements equal to the number of mixtures.\n     */\n    CV_WRAP virtual Mat getWeights() const = 0;\n    /** @brief Returns the cluster centers (means of the Gaussian mixture)\n\n    Returns matrix with the number of rows equal to the number of mixtures and number of columns\n    equal to the space dimensionality.\n     */\n    CV_WRAP virtual Mat getMeans() const = 0;\n    /** @brief Returns covariation matrices\n\n    Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,\n    each matrix is a square floating-point matrix NxN, where N is the space dimensionality.\n     */\n    CV_WRAP virtual void getCovs(CV_OUT std::vector<Mat>& covs) const = 0;\n\n    /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component\n    for the given sample.\n\n    @param sample A sample for classification. It should be a one-channel matrix of\n        \\f$1 \\times dims\\f$ or \\f$dims \\times 1\\f$ size.\n    @param probs Optional output matrix that contains posterior probabilities of each component\n        given the sample. It has \\f$1 \\times nclusters\\f$ size and CV_64FC1 type.\n\n    The method returns a two-element double vector. Zero element is a likelihood logarithm value for\n    the sample. First element is an index of the most probable mixture component for the given\n    sample.\n     */\n    CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0;\n\n    /** @brief Estimate the Gaussian mixture parameters from a samples set.\n\n    This variation starts with Expectation step. Initial values of the model parameters will be\n    estimated by the k-means algorithm.\n\n    Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take\n    responses (class labels or function values) as input. Instead, it computes the *Maximum\n    Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the\n    parameters inside the structure: \\f$p_{i,k}\\f$ in probs, \\f$a_k\\f$ in means , \\f$S_k\\f$ in\n    covs[k], \\f$\\pi_k\\f$ in weights , and optionally computes the output \"class label\" for each\n    sample: \\f$\\texttt{labels}_i=\\texttt{arg max}_k(p_{i,k}), i=1..N\\f$ (indices of the most\n    probable mixture component for each sample).\n\n    The trained model can be used further for prediction, just like any other classifier. The\n    trained model is similar to the NormalBayesClassifier.\n\n    @param samples Samples from which the Gaussian mixture model will be estimated. It should be a\n        one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type\n        it will be converted to the inner matrix of such type for the further computing.\n    @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for\n        each sample. It has \\f$nsamples \\times 1\\f$ size and CV_64FC1 type.\n    @param labels The optional output \"class label\" for each sample:\n        \\f$\\texttt{labels}_i=\\texttt{arg max}_k(p_{i,k}), i=1..N\\f$ (indices of the most probable\n        mixture component for each sample). It has \\f$nsamples \\times 1\\f$ size and CV_32SC1 type.\n    @param probs The optional output matrix that contains posterior probabilities of each Gaussian\n        mixture component given the each sample. It has \\f$nsamples \\times nclusters\\f$ size and\n        CV_64FC1 type.\n     */\n    CV_WRAP virtual bool trainEM(InputArray samples,\n                         OutputArray logLikelihoods=noArray(),\n                         OutputArray labels=noArray(),\n                         OutputArray probs=noArray()) = 0;\n\n    /** @brief Estimate the Gaussian mixture parameters from a samples set.\n\n    This variation starts with Expectation step. You need to provide initial means \\f$a_k\\f$ of\n    mixture components. Optionally you can pass initial weights \\f$\\pi_k\\f$ and covariance matrices\n    \\f$S_k\\f$ of mixture components.\n\n    @param samples Samples from which the Gaussian mixture model will be estimated. It should be a\n        one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type\n        it will be converted to the inner matrix of such type for the further computing.\n    @param means0 Initial means \\f$a_k\\f$ of mixture components. It is a one-channel matrix of\n        \\f$nclusters \\times dims\\f$ size. If the matrix does not have CV_64F type it will be\n        converted to the inner matrix of such type for the further computing.\n    @param covs0 The vector of initial covariance matrices \\f$S_k\\f$ of mixture components. Each of\n        covariance matrices is a one-channel matrix of \\f$dims \\times dims\\f$ size. If the matrices\n        do not have CV_64F type they will be converted to the inner matrices of such type for the\n        further computing.\n    @param weights0 Initial weights \\f$\\pi_k\\f$ of mixture components. It should be a one-channel\n        floating-point matrix with \\f$1 \\times nclusters\\f$ or \\f$nclusters \\times 1\\f$ size.\n    @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for\n        each sample. It has \\f$nsamples \\times 1\\f$ size and CV_64FC1 type.\n    @param labels The optional output \"class label\" for each sample:\n        \\f$\\texttt{labels}_i=\\texttt{arg max}_k(p_{i,k}), i=1..N\\f$ (indices of the most probable\n        mixture component for each sample). It has \\f$nsamples \\times 1\\f$ size and CV_32SC1 type.\n    @param probs The optional output matrix that contains posterior probabilities of each Gaussian\n        mixture component given the each sample. It has \\f$nsamples \\times nclusters\\f$ size and\n        CV_64FC1 type.\n    */\n    CV_WRAP virtual bool trainE(InputArray samples, InputArray means0,\n                        InputArray covs0=noArray(),\n                        InputArray weights0=noArray(),\n                        OutputArray logLikelihoods=noArray(),\n                        OutputArray labels=noArray(),\n                        OutputArray probs=noArray()) = 0;\n\n    /** @brief Estimate the Gaussian mixture parameters from a samples set.\n\n    This variation starts with Maximization step. You need to provide initial probabilities\n    \\f$p_{i,k}\\f$ to use this option.\n\n    @param samples Samples from which the Gaussian mixture model will be estimated. It should be a\n        one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type\n        it will be converted to the inner matrix of such type for the further computing.\n    @param probs0\n    @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for\n        each sample. It has \\f$nsamples \\times 1\\f$ size and CV_64FC1 type.\n    @param labels The optional output \"class label\" for each sample:\n        \\f$\\texttt{labels}_i=\\texttt{arg max}_k(p_{i,k}), i=1..N\\f$ (indices of the most probable\n        mixture component for each sample). It has \\f$nsamples \\times 1\\f$ size and CV_32SC1 type.\n    @param probs The optional output matrix that contains posterior probabilities of each Gaussian\n        mixture component given the each sample. It has \\f$nsamples \\times nclusters\\f$ size and\n        CV_64FC1 type.\n    */\n    CV_WRAP virtual bool trainM(InputArray samples, InputArray probs0,\n                        OutputArray logLikelihoods=noArray(),\n                        OutputArray labels=noArray(),\n                        OutputArray probs=noArray()) = 0;\n\n    /** Creates empty %EM model.\n    The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you\n    can use one of the EM::train\\* methods or load it from file using Algorithm::load\\<EM\\>(filename).\n     */\n    CV_WRAP static Ptr<EM> create();\n};\n\n/****************************************************************************************\\\n*                                      Decision Tree                                     *\n\\****************************************************************************************/\n\n/** @brief The class represents a single decision tree or a collection of decision trees.\n\nThe current public interface of the class allows user to train only a single decision tree, however\nthe class is capable of storing multiple decision trees and using them for prediction (by summing\nresponses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost)\nuse this capability to implement decision tree ensembles.\n\n@sa @ref ml_intro_trees\n*/\nclass CV_EXPORTS_W DTrees : public StatModel\n{\npublic:\n    /** Predict options */\n    enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) };\n\n    /** Cluster possible values of a categorical variable into K\\<=maxCategories clusters to\n    find a suboptimal split.\n    If a discrete variable, on which the training procedure tries to make a split, takes more than\n    maxCategories values, the precise best subset estimation may take a very long time because the\n    algorithm is exponential. Instead, many decision trees engines (including our implementation)\n    try to find sub-optimal split in this case by clustering all the samples into maxCategories\n    clusters that is some categories are merged together. The clustering is applied only in n \\>\n    2-class classification problems for categorical variables with N \\> max_categories possible\n    values. In case of regression and 2-class classification the optimal split can be found\n    efficiently without employing clustering, thus the parameter is not used in these cases.\n    Default value is 10.*/\n    /** @see setMaxCategories */\n    CV_WRAP virtual int getMaxCategories() const = 0;\n    /** @copybrief getMaxCategories @see getMaxCategories */\n    CV_WRAP virtual void setMaxCategories(int val) = 0;\n\n    /** The maximum possible depth of the tree.\n    That is the training algorithms attempts to split a node while its depth is less than maxDepth.\n    The root node has zero depth. The actual depth may be smaller if the other termination criteria\n    are met (see the outline of the training procedure @ref ml_intro_trees \"here\"), and/or if the\n    tree is pruned. Default value is INT_MAX.*/\n    /** @see setMaxDepth */\n    CV_WRAP virtual int getMaxDepth() const = 0;\n    /** @copybrief getMaxDepth @see getMaxDepth */\n    CV_WRAP virtual void setMaxDepth(int val) = 0;\n\n    /** If the number of samples in a node is less than this parameter then the node will not be split.\n\n    Default value is 10.*/\n    /** @see setMinSampleCount */\n    CV_WRAP virtual int getMinSampleCount() const = 0;\n    /** @copybrief getMinSampleCount @see getMinSampleCount */\n    CV_WRAP virtual void setMinSampleCount(int val) = 0;\n\n    /** If CVFolds \\> 1 then algorithms prunes the built decision tree using K-fold\n    cross-validation procedure where K is equal to CVFolds.\n    Default value is 10.*/\n    /** @see setCVFolds */\n    CV_WRAP virtual int getCVFolds() const = 0;\n    /** @copybrief getCVFolds @see getCVFolds */\n    CV_WRAP virtual void setCVFolds(int val) = 0;\n\n    /** If true then surrogate splits will be built.\n    These splits allow to work with missing data and compute variable importance correctly.\n    Default value is false.\n    @note currently it's not implemented.*/\n    /** @see setUseSurrogates */\n    CV_WRAP virtual bool getUseSurrogates() const = 0;\n    /** @copybrief getUseSurrogates @see getUseSurrogates */\n    CV_WRAP virtual void setUseSurrogates(bool val) = 0;\n\n    /** If true then a pruning will be harsher.\n    This will make a tree more compact and more resistant to the training data noise but a bit less\n    accurate. Default value is true.*/\n    /** @see setUse1SERule */\n    CV_WRAP virtual bool getUse1SERule() const = 0;\n    /** @copybrief getUse1SERule @see getUse1SERule */\n    CV_WRAP virtual void setUse1SERule(bool val) = 0;\n\n    /** If true then pruned branches are physically removed from the tree.\n    Otherwise they are retained and it is possible to get results from the original unpruned (or\n    pruned less aggressively) tree. Default value is true.*/\n    /** @see setTruncatePrunedTree */\n    CV_WRAP virtual bool getTruncatePrunedTree() const = 0;\n    /** @copybrief getTruncatePrunedTree @see getTruncatePrunedTree */\n    CV_WRAP virtual void setTruncatePrunedTree(bool val) = 0;\n\n    /** Termination criteria for regression trees.\n    If all absolute differences between an estimated value in a node and values of train samples\n    in this node are less than this parameter then the node will not be split further. Default\n    value is 0.01f*/\n    /** @see setRegressionAccuracy */\n    CV_WRAP virtual float getRegressionAccuracy() const = 0;\n    /** @copybrief getRegressionAccuracy @see getRegressionAccuracy */\n    CV_WRAP virtual void setRegressionAccuracy(float val) = 0;\n\n    /** @brief The array of a priori class probabilities, sorted by the class label value.\n\n    The parameter can be used to tune the decision tree preferences toward a certain class. For\n    example, if you want to detect some rare anomaly occurrence, the training base will likely\n    contain much more normal cases than anomalies, so a very good classification performance\n    will be achieved just by considering every case as normal. To avoid this, the priors can be\n    specified, where the anomaly probability is artificially increased (up to 0.5 or even\n    greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is\n    adjusted properly.\n\n    You can also think about this parameter as weights of prediction categories which determine\n    relative weights that you give to misclassification. That is, if the weight of the first\n    category is 1 and the weight of the second category is 10, then each mistake in predicting\n    the second category is equivalent to making 10 mistakes in predicting the first category.\n    Default value is empty Mat.*/\n    /** @see setPriors */\n    CV_WRAP virtual cv::Mat getPriors() const = 0;\n    /** @copybrief getPriors @see getPriors */\n    CV_WRAP virtual void setPriors(const cv::Mat &val) = 0;\n\n    /** @brief The class represents a decision tree node.\n     */\n    class CV_EXPORTS Node\n    {\n    public:\n        Node();\n        double value; //!< Value at the node: a class label in case of classification or estimated\n                      //!< function value in case of regression.\n        int classIdx; //!< Class index normalized to 0..class_count-1 range and assigned to the\n                      //!< node. It is used internally in classification trees and tree ensembles.\n        int parent; //!< Index of the parent node\n        int left; //!< Index of the left child node\n        int right; //!< Index of right child node\n        int defaultDir; //!< Default direction where to go (-1: left or +1: right). It helps in the\n                        //!< case of missing values.\n        int split; //!< Index of the first split\n    };\n\n    /** @brief The class represents split in a decision tree.\n     */\n    class CV_EXPORTS Split\n    {\n    public:\n        Split();\n        int varIdx; //!< Index of variable on which the split is created.\n        bool inversed; //!< If true, then the inverse split rule is used (i.e. left and right\n                       //!< branches are exchanged in the rule expressions below).\n        float quality; //!< The split quality, a positive number. It is used to choose the best split.\n        int next; //!< Index of the next split in the list of splits for the node\n        float c; /**< The threshold value in case of split on an ordered variable.\n                      The rule is:\n                      @code{.none}\n                      if var_value < c\n                        then next_node <- left\n                        else next_node <- right\n                      @endcode */\n        int subsetOfs; /**< Offset of the bitset used by the split on a categorical variable.\n                            The rule is:\n                            @code{.none}\n                            if bitset[var_value] == 1\n                                then next_node <- left\n                                else next_node <- right\n                            @endcode */\n    };\n\n    /** @brief Returns indices of root nodes\n    */\n    virtual const std::vector<int>& getRoots() const = 0;\n    /** @brief Returns all the nodes\n\n    all the node indices are indices in the returned vector\n     */\n    virtual const std::vector<Node>& getNodes() const = 0;\n    /** @brief Returns all the splits\n\n    all the split indices are indices in the returned vector\n     */\n    virtual const std::vector<Split>& getSplits() const = 0;\n    /** @brief Returns all the bitsets for categorical splits\n\n    Split::subsetOfs is an offset in the returned vector\n     */\n    virtual const std::vector<int>& getSubsets() const = 0;\n\n    /** @brief Creates the empty model\n\n    The static method creates empty decision tree with the specified parameters. It should be then\n    trained using train method (see StatModel::train). Alternatively, you can load the model from\n    file using Algorithm::load\\<DTrees\\>(filename).\n     */\n    CV_WRAP static Ptr<DTrees> create();\n};\n\n/****************************************************************************************\\\n*                                   Random Trees Classifier                              *\n\\****************************************************************************************/\n\n/** @brief The class implements the random forest predictor.\n\n@sa @ref ml_intro_rtrees\n */\nclass CV_EXPORTS_W RTrees : public DTrees\n{\npublic:\n\n    /** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance.\n    Default value is false.*/\n    /** @see setCalculateVarImportance */\n    CV_WRAP virtual bool getCalculateVarImportance() const = 0;\n    /** @copybrief getCalculateVarImportance @see getCalculateVarImportance */\n    CV_WRAP virtual void setCalculateVarImportance(bool val) = 0;\n\n    /** The size of the randomly selected subset of features at each tree node and that are used\n    to find the best split(s).\n    If you set it to 0 then the size will be set to the square root of the total number of\n    features. Default value is 0.*/\n    /** @see setActiveVarCount */\n    CV_WRAP virtual int getActiveVarCount() const = 0;\n    /** @copybrief getActiveVarCount @see getActiveVarCount */\n    CV_WRAP virtual void setActiveVarCount(int val) = 0;\n\n    /** The termination criteria that specifies when the training algorithm stops.\n    Either when the specified number of trees is trained and added to the ensemble or when\n    sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the\n    better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes\n    pass a certain number of trees. Also to keep in mind, the number of tree increases the\n    prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS +\n    TermCriteria::EPS, 50, 0.1)*/\n    /** @see setTermCriteria */\n    CV_WRAP virtual TermCriteria getTermCriteria() const = 0;\n    /** @copybrief getTermCriteria @see getTermCriteria */\n    CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0;\n\n    /** Returns the variable importance array.\n    The method returns the variable importance vector, computed at the training stage when\n    CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is\n    returned.\n     */\n    CV_WRAP virtual Mat getVarImportance() const = 0;\n\n    /** Creates the empty model.\n    Use StatModel::train to train the model, StatModel::train to create and train the model,\n    Algorithm::load to load the pre-trained model.\n     */\n    CV_WRAP static Ptr<RTrees> create();\n};\n\n/****************************************************************************************\\\n*                                   Boosted tree classifier                              *\n\\****************************************************************************************/\n\n/** @brief Boosted tree classifier derived from DTrees\n\n@sa @ref ml_intro_boost\n */\nclass CV_EXPORTS_W Boost : public DTrees\n{\npublic:\n    /** Type of the boosting algorithm.\n    See Boost::Types. Default value is Boost::REAL. */\n    /** @see setBoostType */\n    CV_WRAP virtual int getBoostType() const = 0;\n    /** @copybrief getBoostType @see getBoostType */\n    CV_WRAP virtual void setBoostType(int val) = 0;\n\n    /** The number of weak classifiers.\n    Default value is 100. */\n    /** @see setWeakCount */\n    CV_WRAP virtual int getWeakCount() const = 0;\n    /** @copybrief getWeakCount @see getWeakCount */\n    CV_WRAP virtual void setWeakCount(int val) = 0;\n\n    /** A threshold between 0 and 1 used to save computational time.\n    Samples with summary weight \\f$\\leq 1 - weight_trim_rate\\f$ do not participate in the *next*\n    iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/\n    /** @see setWeightTrimRate */\n    CV_WRAP virtual double getWeightTrimRate() const = 0;\n    /** @copybrief getWeightTrimRate @see getWeightTrimRate */\n    CV_WRAP virtual void setWeightTrimRate(double val) = 0;\n\n    /** Boosting type.\n    Gentle AdaBoost and Real AdaBoost are often the preferable choices. */\n    enum Types {\n        DISCRETE=0, //!< Discrete AdaBoost.\n        REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions\n                //!< and works well with categorical data.\n        LOGIT=2, //!< LogitBoost. It can produce good regression fits.\n        GENTLE=3 //!< Gentle AdaBoost. It puts less weight on outlier data points and for that\n                 //!<reason is often good with regression data.\n    };\n\n    /** Creates the empty model.\n    Use StatModel::train to train the model, Algorithm::load\\<Boost\\>(filename) to load the pre-trained model. */\n    CV_WRAP static Ptr<Boost> create();\n};\n\n/****************************************************************************************\\\n*                                   Gradient Boosted Trees                               *\n\\****************************************************************************************/\n\n/*class CV_EXPORTS_W GBTrees : public DTrees\n{\npublic:\n    struct CV_EXPORTS_W_MAP Params : public DTrees::Params\n    {\n        CV_PROP_RW int weakCount;\n        CV_PROP_RW int lossFunctionType;\n        CV_PROP_RW float subsamplePortion;\n        CV_PROP_RW float shrinkage;\n\n        Params();\n        Params( int lossFunctionType, int weakCount, float shrinkage,\n                float subsamplePortion, int maxDepth, bool useSurrogates );\n    };\n\n    enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS};\n\n    virtual void setK(int k) = 0;\n\n    virtual float predictSerial( InputArray samples,\n                                 OutputArray weakResponses, int flags) const = 0;\n\n    static Ptr<GBTrees> create(const Params& p);\n};*/\n\n/****************************************************************************************\\\n*                              Artificial Neural Networks (ANN)                          *\n\\****************************************************************************************/\n\n/////////////////////////////////// Multi-Layer Perceptrons //////////////////////////////\n\n/** @brief Artificial Neural Networks - Multi-Layer Perceptrons.\n\nUnlike many other models in ML that are constructed and trained at once, in the MLP model these\nsteps are separated. First, a network with the specified topology is created using the non-default\nconstructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is\ntrained using a set of input and output vectors. The training procedure can be repeated more than\nonce, that is, the weights can be adjusted based on the new training data.\n\nAdditional flags for StatModel::train are available: ANN_MLP::TrainFlags.\n\n@sa @ref ml_intro_ann\n */\nclass CV_EXPORTS_W ANN_MLP : public StatModel\n{\npublic:\n    /** Available training methods */\n    enum TrainingMethods {\n        BACKPROP=0, //!< The back-propagation algorithm.\n        RPROP=1 //!< The RPROP algorithm. See @cite RPROP93 for details.\n    };\n\n    /** Sets training method and common parameters.\n    @param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods.\n    @param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP\n    @param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP.\n    */\n    CV_WRAP virtual void setTrainMethod(int method, double param1 = 0, double param2 = 0) = 0;\n\n    /** Returns current training method */\n    CV_WRAP virtual int getTrainMethod() const = 0;\n\n    /** Initialize the activation function for each neuron.\n    Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM.\n    @param type The type of activation function. See ANN_MLP::ActivationFunctions.\n    @param param1 The first parameter of the activation function, \\f$\\alpha\\f$. Default value is 0.\n    @param param2 The second parameter of the activation function, \\f$\\beta\\f$. Default value is 0.\n    */\n    CV_WRAP virtual void setActivationFunction(int type, double param1 = 0, double param2 = 0) = 0;\n\n    /**  Integer vector specifying the number of neurons in each layer including the input and output layers.\n    The very first element specifies the number of elements in the input layer.\n    The last element - number of elements in the output layer. Default value is empty Mat.\n    @sa getLayerSizes */\n    CV_WRAP virtual void setLayerSizes(InputArray _layer_sizes) = 0;\n\n    /**  Integer vector specifying the number of neurons in each layer including the input and output layers.\n    The very first element specifies the number of elements in the input layer.\n    The last element - number of elements in the output layer.\n    @sa setLayerSizes */\n    CV_WRAP virtual cv::Mat getLayerSizes() const = 0;\n\n    /** Termination criteria of the training algorithm.\n    You can specify the maximum number of iterations (maxCount) and/or how much the error could\n    change between the iterations to make the algorithm continue (epsilon). Default value is\n    TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/\n    /** @see setTermCriteria */\n    CV_WRAP virtual TermCriteria getTermCriteria() const = 0;\n    /** @copybrief getTermCriteria @see getTermCriteria */\n    CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0;\n\n    /** BPROP: Strength of the weight gradient term.\n    The recommended value is about 0.1. Default value is 0.1.*/\n    /** @see setBackpropWeightScale */\n    CV_WRAP virtual double getBackpropWeightScale() const = 0;\n    /** @copybrief getBackpropWeightScale @see getBackpropWeightScale */\n    CV_WRAP virtual void setBackpropWeightScale(double val) = 0;\n\n    /** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations).\n    This parameter provides some inertia to smooth the random fluctuations of the weights. It can\n    vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.\n    Default value is 0.1.*/\n    /** @see setBackpropMomentumScale */\n    CV_WRAP virtual double getBackpropMomentumScale() const = 0;\n    /** @copybrief getBackpropMomentumScale @see getBackpropMomentumScale */\n    CV_WRAP virtual void setBackpropMomentumScale(double val) = 0;\n\n    /** RPROP: Initial value \\f$\\Delta_0\\f$ of update-values \\f$\\Delta_{ij}\\f$.\n    Default value is 0.1.*/\n    /** @see setRpropDW0 */\n    CV_WRAP virtual double getRpropDW0() const = 0;\n    /** @copybrief getRpropDW0 @see getRpropDW0 */\n    CV_WRAP virtual void setRpropDW0(double val) = 0;\n\n    /** RPROP: Increase factor \\f$\\eta^+\\f$.\n    It must be \\>1. Default value is 1.2.*/\n    /** @see setRpropDWPlus */\n    CV_WRAP virtual double getRpropDWPlus() const = 0;\n    /** @copybrief getRpropDWPlus @see getRpropDWPlus */\n    CV_WRAP virtual void setRpropDWPlus(double val) = 0;\n\n    /** RPROP: Decrease factor \\f$\\eta^-\\f$.\n    It must be \\<1. Default value is 0.5.*/\n    /** @see setRpropDWMinus */\n    CV_WRAP virtual double getRpropDWMinus() const = 0;\n    /** @copybrief getRpropDWMinus @see getRpropDWMinus */\n    CV_WRAP virtual void setRpropDWMinus(double val) = 0;\n\n    /** RPROP: Update-values lower limit \\f$\\Delta_{min}\\f$.\n    It must be positive. Default value is FLT_EPSILON.*/\n    /** @see setRpropDWMin */\n    CV_WRAP virtual double getRpropDWMin() const = 0;\n    /** @copybrief getRpropDWMin @see getRpropDWMin */\n    CV_WRAP virtual void setRpropDWMin(double val) = 0;\n\n    /** RPROP: Update-values upper limit \\f$\\Delta_{max}\\f$.\n    It must be \\>1. Default value is 50.*/\n    /** @see setRpropDWMax */\n    CV_WRAP virtual double getRpropDWMax() const = 0;\n    /** @copybrief getRpropDWMax @see getRpropDWMax */\n    CV_WRAP virtual void setRpropDWMax(double val) = 0;\n\n    /** possible activation functions */\n    enum ActivationFunctions {\n        /** Identity function: \\f$f(x)=x\\f$ */\n        IDENTITY = 0,\n        /** Symmetrical sigmoid: \\f$f(x)=\\beta*(1-e^{-\\alpha x})/(1+e^{-\\alpha x}\\f$\n        @note\n        If you are using the default sigmoid activation function with the default parameter values\n        fparam1=0 and fparam2=0 then the function used is y = 1.7159\\*tanh(2/3 \\* x), so the output\n        will range from [-1.7159, 1.7159], instead of [0,1].*/\n        SIGMOID_SYM = 1,\n        /** Gaussian function: \\f$f(x)=\\beta e^{-\\alpha x*x}\\f$ */\n        GAUSSIAN = 2\n    };\n\n    /** Train options */\n    enum TrainFlags {\n        /** Update the network weights, rather than compute them from scratch. In the latter case\n        the weights are initialized using the Nguyen-Widrow algorithm. */\n        UPDATE_WEIGHTS = 1,\n        /** Do not normalize the input vectors. If this flag is not set, the training algorithm\n        normalizes each input feature independently, shifting its mean value to 0 and making the\n        standard deviation equal to 1. If the network is assumed to be updated frequently, the new\n        training data could be much different from original one. In this case, you should take care\n        of proper normalization. */\n        NO_INPUT_SCALE = 2,\n        /** Do not normalize the output vectors. If the flag is not set, the training algorithm\n        normalizes each output feature independently, by transforming it to the certain range\n        depending on the used activation function. */\n        NO_OUTPUT_SCALE = 4\n    };\n\n    CV_WRAP virtual Mat getWeights(int layerIdx) const = 0;\n\n    /** @brief Creates empty model\n\n    Use StatModel::train to train the model, Algorithm::load\\<ANN_MLP\\>(filename) to load the pre-trained model.\n    Note that the train method has optional flags: ANN_MLP::TrainFlags.\n     */\n    CV_WRAP static Ptr<ANN_MLP> create();\n};\n\n/****************************************************************************************\\\n*                           Logistic Regression                                          *\n\\****************************************************************************************/\n\n/** @brief Implements Logistic Regression classifier.\n\n@sa @ref ml_intro_lr\n */\nclass CV_EXPORTS_W LogisticRegression : public StatModel\n{\npublic:\n\n    /** Learning rate. */\n    /** @see setLearningRate */\n    CV_WRAP virtual double getLearningRate() const = 0;\n    /** @copybrief getLearningRate @see getLearningRate */\n    CV_WRAP virtual void setLearningRate(double val) = 0;\n\n    /** Number of iterations. */\n    /** @see setIterations */\n    CV_WRAP virtual int getIterations() const = 0;\n    /** @copybrief getIterations @see getIterations */\n    CV_WRAP virtual void setIterations(int val) = 0;\n\n    /** Kind of regularization to be applied. See LogisticRegression::RegKinds. */\n    /** @see setRegularization */\n    CV_WRAP virtual int getRegularization() const = 0;\n    /** @copybrief getRegularization @see getRegularization */\n    CV_WRAP virtual void setRegularization(int val) = 0;\n\n    /** Kind of training method used. See LogisticRegression::Methods. */\n    /** @see setTrainMethod */\n    CV_WRAP virtual int getTrainMethod() const = 0;\n    /** @copybrief getTrainMethod @see getTrainMethod */\n    CV_WRAP virtual void setTrainMethod(int val) = 0;\n\n    /** Specifies the number of training samples taken in each step of Mini-Batch Gradient\n    Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It\n    has to take values less than the total number of training samples. */\n    /** @see setMiniBatchSize */\n    CV_WRAP virtual int getMiniBatchSize() const = 0;\n    /** @copybrief getMiniBatchSize @see getMiniBatchSize */\n    CV_WRAP virtual void setMiniBatchSize(int val) = 0;\n\n    /** Termination criteria of the algorithm. */\n    /** @see setTermCriteria */\n    CV_WRAP virtual TermCriteria getTermCriteria() const = 0;\n    /** @copybrief getTermCriteria @see getTermCriteria */\n    CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0;\n\n    //! Regularization kinds\n    enum RegKinds {\n        REG_DISABLE = -1, //!< Regularization disabled\n        REG_L1 = 0, //!< %L1 norm\n        REG_L2 = 1 //!< %L2 norm\n    };\n\n    //! Training methods\n    enum Methods {\n        BATCH = 0,\n        MINI_BATCH = 1 //!< Set MiniBatchSize to a positive integer when using this method.\n    };\n\n    /** @brief Predicts responses for input samples and returns a float type.\n\n    @param samples The input data for the prediction algorithm. Matrix [m x n], where each row\n        contains variables (features) of one object being classified. Should have data type CV_32F.\n    @param results Predicted labels as a column matrix of type CV_32S.\n    @param flags Not used.\n     */\n    CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;\n\n    /** @brief This function returns the trained paramters arranged across rows.\n\n    For a two class classifcation problem, it returns a row matrix. It returns learnt paramters of\n    the Logistic Regression as a matrix of type CV_32F.\n     */\n    CV_WRAP virtual Mat get_learnt_thetas() const = 0;\n\n    /** @brief Creates empty model.\n\n    Creates Logistic Regression model with parameters given.\n     */\n    CV_WRAP static Ptr<LogisticRegression> create();\n};\n\n/****************************************************************************************\\\n*                           Auxilary functions declarations                              *\n\\****************************************************************************************/\n\n/** @brief Generates _sample_ from multivariate normal distribution\n\n@param mean an average row vector\n@param cov symmetric covariation matrix\n@param nsamples returned samples count\n@param samples returned samples array\n*/\nCV_EXPORTS void randMVNormal( InputArray mean, InputArray cov, int nsamples, OutputArray samples);\n\n/** @brief Creates test set */\nCV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses,\n                                                OutputArray samples, OutputArray responses);\n\n//! @} ml\n\n}\n}\n\n#endif // __cplusplus\n#endif // __OPENCV_ML_HPP__\n\n/* End of file. */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/objdetect/detection_based_tracker.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OBJDETECT_DBT_HPP__\n#define __OPENCV_OBJDETECT_DBT_HPP__\n\n// After this condition removal update blacklist for bindings: modules/python/common.cmake\n#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(__ANDROID__) || \\\n  (defined(__cplusplus) &&  __cplusplus > 201103L) || (defined(_MSC_VER) && _MSC_VER >= 1700)\n\n#include <vector>\n\nnamespace cv\n{\n\n//! @addtogroup objdetect\n//! @{\n\nclass CV_EXPORTS DetectionBasedTracker\n{\n    public:\n        struct Parameters\n        {\n            int maxTrackLifetime;\n            int minDetectionPeriod; //the minimal time between run of the big object detector (on the whole frame) in ms (1000 mean 1 sec), default=0\n\n            Parameters();\n        };\n\n        class IDetector\n        {\n            public:\n                IDetector():\n                    minObjSize(96, 96),\n                    maxObjSize(INT_MAX, INT_MAX),\n                    minNeighbours(2),\n                    scaleFactor(1.1f)\n                {}\n\n                virtual void detect(const cv::Mat& image, std::vector<cv::Rect>& objects) = 0;\n\n                void setMinObjectSize(const cv::Size& min)\n                {\n                    minObjSize = min;\n                }\n                void setMaxObjectSize(const cv::Size& max)\n                {\n                    maxObjSize = max;\n                }\n                cv::Size getMinObjectSize() const\n                {\n                    return minObjSize;\n                }\n                cv::Size getMaxObjectSize() const\n                {\n                    return maxObjSize;\n                }\n                float getScaleFactor()\n                {\n                    return scaleFactor;\n                }\n                void setScaleFactor(float value)\n                {\n                    scaleFactor = value;\n                }\n                int getMinNeighbours()\n                {\n                    return minNeighbours;\n                }\n                void setMinNeighbours(int value)\n                {\n                    minNeighbours = value;\n                }\n                virtual ~IDetector() {}\n\n            protected:\n                cv::Size minObjSize;\n                cv::Size maxObjSize;\n                int minNeighbours;\n                float scaleFactor;\n        };\n\n        DetectionBasedTracker(cv::Ptr<IDetector> mainDetector, cv::Ptr<IDetector> trackingDetector, const Parameters& params);\n        virtual ~DetectionBasedTracker();\n\n        virtual bool run();\n        virtual void stop();\n        virtual void resetTracking();\n\n        virtual void process(const cv::Mat& imageGray);\n\n        bool setParameters(const Parameters& params);\n        const Parameters& getParameters() const;\n\n\n        typedef std::pair<cv::Rect, int> Object;\n        virtual void getObjects(std::vector<cv::Rect>& result) const;\n        virtual void getObjects(std::vector<Object>& result) const;\n\n        enum ObjectStatus\n        {\n            DETECTED_NOT_SHOWN_YET,\n            DETECTED,\n            DETECTED_TEMPORARY_LOST,\n            WRONG_OBJECT\n        };\n        struct ExtObject\n        {\n            int id;\n            cv::Rect location;\n            ObjectStatus status;\n            ExtObject(int _id, cv::Rect _location, ObjectStatus _status)\n                :id(_id), location(_location), status(_status)\n            {\n            }\n        };\n        virtual void getObjects(std::vector<ExtObject>& result) const;\n\n\n        virtual int addObject(const cv::Rect& location); //returns id of the new object\n\n    protected:\n        class SeparateDetectionWork;\n        cv::Ptr<SeparateDetectionWork> separateDetectionWork;\n        friend void* workcycleObjectDetectorFunction(void* p);\n\n        struct InnerParameters\n        {\n            int numLastPositionsToTrack;\n            int numStepsToWaitBeforeFirstShow;\n            int numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown;\n            int numStepsToShowWithoutDetecting;\n\n            float coeffTrackingWindowSize;\n            float coeffObjectSizeToTrack;\n            float coeffObjectSpeedUsingInPrediction;\n\n            InnerParameters();\n        };\n        Parameters parameters;\n        InnerParameters innerParameters;\n\n        struct TrackedObject\n        {\n            typedef std::vector<cv::Rect> PositionsVector;\n\n            PositionsVector lastPositions;\n\n            int numDetectedFrames;\n            int numFramesNotDetected;\n            int id;\n\n            TrackedObject(const cv::Rect& rect):numDetectedFrames(1), numFramesNotDetected(0)\n            {\n                lastPositions.push_back(rect);\n                id=getNextId();\n            };\n\n            static int getNextId()\n            {\n                static int _id=0;\n                return _id++;\n            }\n        };\n\n        int numTrackedSteps;\n        std::vector<TrackedObject> trackedObjects;\n\n        std::vector<float> weightsPositionsSmoothing;\n        std::vector<float> weightsSizesSmoothing;\n\n        cv::Ptr<IDetector> cascadeForTracking;\n\n        void updateTrackedObjects(const std::vector<cv::Rect>& detectedObjects);\n        cv::Rect calcTrackedObjectPositionToShow(int i) const;\n        cv::Rect calcTrackedObjectPositionToShow(int i, ObjectStatus& status) const;\n        void detectInRegion(const cv::Mat& img, const cv::Rect& r, std::vector<cv::Rect>& detectedObjectsInRegions);\n};\n\n//! @} objdetect\n\n} //end of cv namespace\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/objdetect/objdetect.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/objdetect.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/objdetect/objdetect_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OBJDETECT_C_H__\n#define __OPENCV_OBJDETECT_C_H__\n\n#include \"opencv2/core/core_c.h\"\n\n#ifdef __cplusplus\n#include <deque>\n#include <vector>\n\nextern \"C\" {\n#endif\n\n/** @addtogroup objdetect_c\n  @{\n  */\n\n/****************************************************************************************\\\n*                         Haar-like Object Detection functions                           *\n\\****************************************************************************************/\n\n#define CV_HAAR_MAGIC_VAL    0x42500000\n#define CV_TYPE_NAME_HAAR    \"opencv-haar-classifier\"\n\n#define CV_IS_HAAR_CLASSIFIER( haar )                                                    \\\n    ((haar) != NULL &&                                                                   \\\n    (((const CvHaarClassifierCascade*)(haar))->flags & CV_MAGIC_MASK)==CV_HAAR_MAGIC_VAL)\n\n#define CV_HAAR_FEATURE_MAX  3\n\ntypedef struct CvHaarFeature\n{\n    int tilted;\n    struct\n    {\n        CvRect r;\n        float weight;\n    } rect[CV_HAAR_FEATURE_MAX];\n} CvHaarFeature;\n\ntypedef struct CvHaarClassifier\n{\n    int count;\n    CvHaarFeature* haar_feature;\n    float* threshold;\n    int* left;\n    int* right;\n    float* alpha;\n} CvHaarClassifier;\n\ntypedef struct CvHaarStageClassifier\n{\n    int  count;\n    float threshold;\n    CvHaarClassifier* classifier;\n\n    int next;\n    int child;\n    int parent;\n} CvHaarStageClassifier;\n\ntypedef struct CvHidHaarClassifierCascade CvHidHaarClassifierCascade;\n\ntypedef struct CvHaarClassifierCascade\n{\n    int  flags;\n    int  count;\n    CvSize orig_window_size;\n    CvSize real_window_size;\n    double scale;\n    CvHaarStageClassifier* stage_classifier;\n    CvHidHaarClassifierCascade* hid_cascade;\n} CvHaarClassifierCascade;\n\ntypedef struct CvAvgComp\n{\n    CvRect rect;\n    int neighbors;\n} CvAvgComp;\n\n/* Loads haar classifier cascade from a directory.\n   It is obsolete: convert your cascade to xml and use cvLoad instead */\nCVAPI(CvHaarClassifierCascade*) cvLoadHaarClassifierCascade(\n                    const char* directory, CvSize orig_window_size);\n\nCVAPI(void) cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade );\n\n#define CV_HAAR_DO_CANNY_PRUNING    1\n#define CV_HAAR_SCALE_IMAGE         2\n#define CV_HAAR_FIND_BIGGEST_OBJECT 4\n#define CV_HAAR_DO_ROUGH_SEARCH     8\n\nCVAPI(CvSeq*) cvHaarDetectObjects( const CvArr* image,\n                     CvHaarClassifierCascade* cascade, CvMemStorage* storage,\n                     double scale_factor CV_DEFAULT(1.1),\n                     int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0),\n                     CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)));\n\n/* sets images for haar classifier cascade */\nCVAPI(void) cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade,\n                                                const CvArr* sum, const CvArr* sqsum,\n                                                const CvArr* tilted_sum, double scale );\n\n/* runs the cascade on the specified window */\nCVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade,\n                                       CvPoint pt, int start_stage CV_DEFAULT(0));\n\n/** @} objdetect_c */\n\n#ifdef __cplusplus\n}\n\nCV_EXPORTS CvSeq* cvHaarDetectObjectsForROC( const CvArr* image,\n                     CvHaarClassifierCascade* cascade, CvMemStorage* storage,\n                     std::vector<int>& rejectLevels, std::vector<double>& levelWeightds,\n                     double scale_factor = 1.1,\n                     int min_neighbors = 3, int flags = 0,\n                     CvSize min_size = cvSize(0, 0), CvSize max_size = cvSize(0, 0),\n                     bool outputRejectLevels = false );\n\n#endif\n\n#endif /* __OPENCV_OBJDETECT_C_H__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/objdetect.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OBJDETECT_HPP__\n#define __OPENCV_OBJDETECT_HPP__\n\n#include \"opencv2/core.hpp\"\n\n/**\n@defgroup objdetect Object Detection\n\nHaar Feature-based Cascade Classifier for Object Detection\n----------------------------------------------------------\n\nThe object detector described below has been initially proposed by Paul Viola @cite Viola01 and\nimproved by Rainer Lienhart @cite Lienhart02 .\n\nFirst, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is\ntrained with a few hundred sample views of a particular object (i.e., a face or a car), called\npositive examples, that are scaled to the same size (say, 20x20), and negative examples - arbitrary\nimages of the same size.\n\nAfter a classifier is trained, it can be applied to a region of interest (of the same size as used\nduring the training) in an input image. The classifier outputs a \"1\" if the region is likely to show\nthe object (i.e., face/car), and \"0\" otherwise. To search for the object in the whole image one can\nmove the search window across the image and check every location using the classifier. The\nclassifier is designed so that it can be easily \"resized\" in order to be able to find the objects of\ninterest at different sizes, which is more efficient than resizing the image itself. So, to find an\nobject of an unknown size in the image the scan procedure should be done several times at different\nscales.\n\nThe word \"cascade\" in the classifier name means that the resultant classifier consists of several\nsimpler classifiers (*stages*) that are applied subsequently to a region of interest until at some\nstage the candidate is rejected or all the stages are passed. The word \"boosted\" means that the\nclassifiers at every stage of the cascade are complex themselves and they are built out of basic\nclassifiers using one of four different boosting techniques (weighted voting). Currently Discrete\nAdaboost, Real Adaboost, Gentle Adaboost and Logitboost are supported. The basic classifiers are\ndecision-tree classifiers with at least 2 leaves. Haar-like features are the input to the basic\nclassifiers, and are calculated as described below. The current algorithm uses the following\nHaar-like features:\n\n![image](pics/haarfeatures.png)\n\nThe feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within\nthe region of interest and the scale (this scale is not the same as the scale used at the detection\nstage, though these two scales are multiplied). For example, in the case of the third line feature\n(2c) the response is calculated as the difference between the sum of image pixels under the\nrectangle covering the whole feature (including the two white stripes and the black stripe in the\nmiddle) and the sum of the image pixels under the black stripe multiplied by 3 in order to\ncompensate for the differences in the size of areas. The sums of pixel values over a rectangular\nregions are calculated rapidly using integral images (see below and the integral description).\n\nTo see the object detector at work, have a look at the facedetect demo:\n<https://github.com/Itseez/opencv/tree/master/samples/cpp/dbt_face_detection.cpp>\n\nThe following reference is for the detection part only. There is a separate application called\nopencv_traincascade that can train a cascade of boosted classifiers from a set of samples.\n\n@note In the new C++ interface it is also possible to use LBP (local binary pattern) features in\naddition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection\nusing a Boosted Cascade of Simple Features. IEEE CVPR, 2001. The paper is available online at\n<http://research.microsoft.com/en-us/um/people/viola/Pubs/Detect/violaJones_CVPR2001.pdf>\n\n@{\n    @defgroup objdetect_c C API\n@}\n */\n\ntypedef struct CvHaarClassifierCascade CvHaarClassifierCascade;\n\nnamespace cv\n{\n\n//! @addtogroup objdetect\n//! @{\n\n///////////////////////////// Object Detection ////////////////////////////\n\n//! class for grouping object candidates, detected by Cascade Classifier, HOG etc.\n//! instance of the class is to be passed to cv::partition (see cxoperations.hpp)\nclass CV_EXPORTS SimilarRects\n{\npublic:\n    SimilarRects(double _eps) : eps(_eps) {}\n    inline bool operator()(const Rect& r1, const Rect& r2) const\n    {\n        double delta = eps*(std::min(r1.width, r2.width) + std::min(r1.height, r2.height))*0.5;\n        return std::abs(r1.x - r2.x) <= delta &&\n            std::abs(r1.y - r2.y) <= delta &&\n            std::abs(r1.x + r1.width - r2.x - r2.width) <= delta &&\n            std::abs(r1.y + r1.height - r2.y - r2.height) <= delta;\n    }\n    double eps;\n};\n\n/** @brief Groups the object candidate rectangles.\n\n@param rectList Input/output vector of rectangles. Output vector includes retained and grouped\nrectangles. (The Python list is not modified in place.)\n@param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a\ngroup of rectangles to retain it.\n@param eps Relative difference between sides of the rectangles to merge them into a group.\n\nThe function is a wrapper for the generic function partition . It clusters all the input rectangles\nusing the rectangle equivalence criteria that combines rectangles with similar sizes and similar\nlocations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If\n\\f$\\texttt{eps}\\rightarrow +\\inf\\f$ , all the rectangles are put in one cluster. Then, the small\nclusters containing less than or equal to groupThreshold rectangles are rejected. In each other\ncluster, the average rectangle is computed and put into the output rectangle list.\n */\nCV_EXPORTS   void groupRectangles(std::vector<Rect>& rectList, int groupThreshold, double eps = 0.2);\n/** @overload */\nCV_EXPORTS_W void groupRectangles(CV_IN_OUT std::vector<Rect>& rectList, CV_OUT std::vector<int>& weights,\n                                  int groupThreshold, double eps = 0.2);\n/** @overload */\nCV_EXPORTS   void groupRectangles(std::vector<Rect>& rectList, int groupThreshold,\n                                  double eps, std::vector<int>* weights, std::vector<double>* levelWeights );\n/** @overload */\nCV_EXPORTS   void groupRectangles(std::vector<Rect>& rectList, std::vector<int>& rejectLevels,\n                                  std::vector<double>& levelWeights, int groupThreshold, double eps = 0.2);\n/** @overload */\nCV_EXPORTS   void groupRectangles_meanshift(std::vector<Rect>& rectList, std::vector<double>& foundWeights,\n                                            std::vector<double>& foundScales,\n                                            double detectThreshold = 0.0, Size winDetSize = Size(64, 128));\n\ntemplate<> CV_EXPORTS void DefaultDeleter<CvHaarClassifierCascade>::operator ()(CvHaarClassifierCascade* obj) const;\n\nenum { CASCADE_DO_CANNY_PRUNING    = 1,\n       CASCADE_SCALE_IMAGE         = 2,\n       CASCADE_FIND_BIGGEST_OBJECT = 4,\n       CASCADE_DO_ROUGH_SEARCH     = 8\n     };\n\nclass CV_EXPORTS_W BaseCascadeClassifier : public Algorithm\n{\npublic:\n    virtual ~BaseCascadeClassifier();\n    virtual bool empty() const = 0;\n    virtual bool load( const String& filename ) = 0;\n    virtual void detectMultiScale( InputArray image,\n                           CV_OUT std::vector<Rect>& objects,\n                           double scaleFactor,\n                           int minNeighbors, int flags,\n                           Size minSize, Size maxSize ) = 0;\n\n    virtual void detectMultiScale( InputArray image,\n                           CV_OUT std::vector<Rect>& objects,\n                           CV_OUT std::vector<int>& numDetections,\n                           double scaleFactor,\n                           int minNeighbors, int flags,\n                           Size minSize, Size maxSize ) = 0;\n\n    virtual void detectMultiScale( InputArray image,\n                                   CV_OUT std::vector<Rect>& objects,\n                                   CV_OUT std::vector<int>& rejectLevels,\n                                   CV_OUT std::vector<double>& levelWeights,\n                                   double scaleFactor,\n                                   int minNeighbors, int flags,\n                                   Size minSize, Size maxSize,\n                                   bool outputRejectLevels ) = 0;\n\n    virtual bool isOldFormatCascade() const = 0;\n    virtual Size getOriginalWindowSize() const = 0;\n    virtual int getFeatureType() const = 0;\n    virtual void* getOldCascade() = 0;\n\n    class CV_EXPORTS MaskGenerator\n    {\n    public:\n        virtual ~MaskGenerator() {}\n        virtual Mat generateMask(const Mat& src)=0;\n        virtual void initializeMask(const Mat& /*src*/) { }\n    };\n    virtual void setMaskGenerator(const Ptr<MaskGenerator>& maskGenerator) = 0;\n    virtual Ptr<MaskGenerator> getMaskGenerator() = 0;\n};\n\n/** @brief Cascade classifier class for object detection.\n */\nclass CV_EXPORTS_W CascadeClassifier\n{\npublic:\n    CV_WRAP CascadeClassifier();\n    /** @brief Loads a classifier from a file.\n\n    @param filename Name of the file from which the classifier is loaded.\n     */\n    CV_WRAP CascadeClassifier(const String& filename);\n    ~CascadeClassifier();\n    /** @brief Checks whether the classifier has been loaded.\n    */\n    CV_WRAP bool empty() const;\n    /** @brief Loads a classifier from a file.\n\n    @param filename Name of the file from which the classifier is loaded. The file may contain an old\n    HAAR classifier trained by the haartraining application or a new cascade classifier trained by the\n    traincascade application.\n     */\n    CV_WRAP bool load( const String& filename );\n    /** @brief Reads a classifier from a FileStorage node.\n\n    @note The file may contain a new cascade classifier (trained traincascade application) only.\n     */\n    CV_WRAP bool read( const FileNode& node );\n\n    /** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list\n    of rectangles.\n\n    @param image Matrix of the type CV_8U containing an image where objects are detected.\n    @param objects Vector of rectangles where each rectangle contains the detected object, the\n    rectangles may be partially outside the original image.\n    @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.\n    @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have\n    to retain it.\n    @param flags Parameter with the same meaning for an old cascade as in the function\n    cvHaarDetectObjects. It is not used for a new cascade.\n    @param minSize Minimum possible object size. Objects smaller than that are ignored.\n    @param maxSize Maximum possible object size. Objects larger than that are ignored.\n\n    The function is parallelized with the TBB library.\n\n    @note\n       -   (Python) A face detection example using cascade classifiers can be found at\n            opencv_source_code/samples/python/facedetect.py\n    */\n    CV_WRAP void detectMultiScale( InputArray image,\n                          CV_OUT std::vector<Rect>& objects,\n                          double scaleFactor = 1.1,\n                          int minNeighbors = 3, int flags = 0,\n                          Size minSize = Size(),\n                          Size maxSize = Size() );\n\n    /** @overload\n    @param image Matrix of the type CV_8U containing an image where objects are detected.\n    @param objects Vector of rectangles where each rectangle contains the detected object, the\n    rectangles may be partially outside the original image.\n    @param numDetections Vector of detection numbers for the corresponding objects. An object's number\n    of detections is the number of neighboring positively classified rectangles that were joined\n    together to form the object.\n    @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.\n    @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have\n    to retain it.\n    @param flags Parameter with the same meaning for an old cascade as in the function\n    cvHaarDetectObjects. It is not used for a new cascade.\n    @param minSize Minimum possible object size. Objects smaller than that are ignored.\n    @param maxSize Maximum possible object size. Objects larger than that are ignored.\n    */\n    CV_WRAP_AS(detectMultiScale2) void detectMultiScale( InputArray image,\n                          CV_OUT std::vector<Rect>& objects,\n                          CV_OUT std::vector<int>& numDetections,\n                          double scaleFactor=1.1,\n                          int minNeighbors=3, int flags=0,\n                          Size minSize=Size(),\n                          Size maxSize=Size() );\n\n    /** @overload\n    if `outputRejectLevels` is `true` returns `rejectLevels` and `levelWeights`\n    */\n    CV_WRAP_AS(detectMultiScale3) void detectMultiScale( InputArray image,\n                                  CV_OUT std::vector<Rect>& objects,\n                                  CV_OUT std::vector<int>& rejectLevels,\n                                  CV_OUT std::vector<double>& levelWeights,\n                                  double scaleFactor = 1.1,\n                                  int minNeighbors = 3, int flags = 0,\n                                  Size minSize = Size(),\n                                  Size maxSize = Size(),\n                                  bool outputRejectLevels = false );\n\n    CV_WRAP bool isOldFormatCascade() const;\n    CV_WRAP Size getOriginalWindowSize() const;\n    CV_WRAP int getFeatureType() const;\n    void* getOldCascade();\n\n    CV_WRAP static bool convert(const String& oldcascade, const String& newcascade);\n\n    void setMaskGenerator(const Ptr<BaseCascadeClassifier::MaskGenerator>& maskGenerator);\n    Ptr<BaseCascadeClassifier::MaskGenerator> getMaskGenerator();\n\n    Ptr<BaseCascadeClassifier> cc;\n};\n\nCV_EXPORTS Ptr<BaseCascadeClassifier::MaskGenerator> createFaceDetectionMaskGenerator();\n\n//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////\n\n//! struct for detection region of interest (ROI)\nstruct DetectionROI\n{\n   //! scale(size) of the bounding box\n   double scale;\n   //! set of requrested locations to be evaluated\n   std::vector<cv::Point> locations;\n   //! vector that will contain confidence values for each location\n   std::vector<double> confidences;\n};\n\nstruct CV_EXPORTS_W HOGDescriptor\n{\npublic:\n    enum { L2Hys = 0\n         };\n    enum { DEFAULT_NLEVELS = 64\n         };\n\n    CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8),\n        cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1),\n        histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true),\n        free_coef(-1.f), nlevels(HOGDescriptor::DEFAULT_NLEVELS), signedGradient(false)\n    {}\n\n    CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride,\n                  Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1,\n                  int _histogramNormType=HOGDescriptor::L2Hys,\n                  double _L2HysThreshold=0.2, bool _gammaCorrection=false,\n                  int _nlevels=HOGDescriptor::DEFAULT_NLEVELS, bool _signedGradient=false)\n    : winSize(_winSize), blockSize(_blockSize), blockStride(_blockStride), cellSize(_cellSize),\n    nbins(_nbins), derivAperture(_derivAperture), winSigma(_winSigma),\n    histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold),\n    gammaCorrection(_gammaCorrection), free_coef(-1.f), nlevels(_nlevels), signedGradient(_signedGradient)\n    {}\n\n    CV_WRAP HOGDescriptor(const String& filename)\n    {\n        load(filename);\n    }\n\n    HOGDescriptor(const HOGDescriptor& d)\n    {\n        d.copyTo(*this);\n    }\n\n    virtual ~HOGDescriptor() {}\n\n    CV_WRAP size_t getDescriptorSize() const;\n    CV_WRAP bool checkDetectorSize() const;\n    CV_WRAP double getWinSigma() const;\n\n    CV_WRAP virtual void setSVMDetector(InputArray _svmdetector);\n\n    virtual bool read(FileNode& fn);\n    virtual void write(FileStorage& fs, const String& objname) const;\n\n    CV_WRAP virtual bool load(const String& filename, const String& objname = String());\n    CV_WRAP virtual void save(const String& filename, const String& objname = String()) const;\n    virtual void copyTo(HOGDescriptor& c) const;\n\n    CV_WRAP virtual void compute(InputArray img,\n                         CV_OUT std::vector<float>& descriptors,\n                         Size winStride = Size(), Size padding = Size(),\n                         const std::vector<Point>& locations = std::vector<Point>()) const;\n\n    //! with found weights output\n    CV_WRAP virtual void detect(const Mat& img, CV_OUT std::vector<Point>& foundLocations,\n                        CV_OUT std::vector<double>& weights,\n                        double hitThreshold = 0, Size winStride = Size(),\n                        Size padding = Size(),\n                        const std::vector<Point>& searchLocations = std::vector<Point>()) const;\n    //! without found weights output\n    virtual void detect(const Mat& img, CV_OUT std::vector<Point>& foundLocations,\n                        double hitThreshold = 0, Size winStride = Size(),\n                        Size padding = Size(),\n                        const std::vector<Point>& searchLocations=std::vector<Point>()) const;\n\n    //! with result weights output\n    CV_WRAP virtual void detectMultiScale(InputArray img, CV_OUT std::vector<Rect>& foundLocations,\n                                  CV_OUT std::vector<double>& foundWeights, double hitThreshold = 0,\n                                  Size winStride = Size(), Size padding = Size(), double scale = 1.05,\n                                  double finalThreshold = 2.0,bool useMeanshiftGrouping = false) const;\n    //! without found weights output\n    virtual void detectMultiScale(InputArray img, CV_OUT std::vector<Rect>& foundLocations,\n                                  double hitThreshold = 0, Size winStride = Size(),\n                                  Size padding = Size(), double scale = 1.05,\n                                  double finalThreshold = 2.0, bool useMeanshiftGrouping = false) const;\n\n    CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs,\n                                 Size paddingTL = Size(), Size paddingBR = Size()) const;\n\n    CV_WRAP static std::vector<float> getDefaultPeopleDetector();\n    CV_WRAP static std::vector<float> getDaimlerPeopleDetector();\n\n    CV_PROP Size winSize;\n    CV_PROP Size blockSize;\n    CV_PROP Size blockStride;\n    CV_PROP Size cellSize;\n    CV_PROP int nbins;\n    CV_PROP int derivAperture;\n    CV_PROP double winSigma;\n    CV_PROP int histogramNormType;\n    CV_PROP double L2HysThreshold;\n    CV_PROP bool gammaCorrection;\n    CV_PROP std::vector<float> svmDetector;\n    UMat oclSvmDetector;\n    float free_coef;\n    CV_PROP int nlevels;\n    CV_PROP bool signedGradient;\n\n\n    //! evaluate specified ROI and return confidence value for each location\n    virtual void detectROI(const cv::Mat& img, const std::vector<cv::Point> &locations,\n                                   CV_OUT std::vector<cv::Point>& foundLocations, CV_OUT std::vector<double>& confidences,\n                                   double hitThreshold = 0, cv::Size winStride = Size(),\n                                   cv::Size padding = Size()) const;\n\n    //! evaluate specified ROI and return confidence value for each location in multiple scales\n    virtual void detectMultiScaleROI(const cv::Mat& img,\n                                                       CV_OUT std::vector<cv::Rect>& foundLocations,\n                                                       std::vector<DetectionROI>& locations,\n                                                       double hitThreshold = 0,\n                                                       int groupThreshold = 0) const;\n\n    //! read/parse Dalal's alt model file\n    void readALTModel(String modelfile);\n    void groupRectangles(std::vector<cv::Rect>& rectList, std::vector<double>& weights, int groupThreshold, double eps) const;\n};\n\n//! @} objdetect\n\n}\n\n#include \"opencv2/objdetect/detection_based_tracker.hpp\"\n\n#ifndef DISABLE_OPENCV_24_COMPATIBILITY\n#include \"opencv2/objdetect/objdetect_c.h\"\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/opencv.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_ALL_HPP__\n#define __OPENCV_ALL_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/imgproc.hpp\"\n#include \"opencv2/photo.hpp\"\n#include \"opencv2/video.hpp\"\n#include \"opencv2/features2d.hpp\"\n#include \"opencv2/objdetect.hpp\"\n#include \"opencv2/calib3d.hpp\"\n#include \"opencv2/imgcodecs.hpp\"\n#include \"opencv2/videoio.hpp\"\n#include \"opencv2/highgui.hpp\"\n#include \"opencv2/ml.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/opencv_modules.hpp",
    "content": "/*\n *      ** File generated automatically, do not modify **\n *\n * This file defines the list of modules available in current build configuration\n *\n *\n*/\n\n#define HAVE_OPENCV_ARUCO\n#define HAVE_OPENCV_BGSEGM\n#define HAVE_OPENCV_BIOINSPIRED\n#define HAVE_OPENCV_CALIB3D\n#define HAVE_OPENCV_CCALIB\n#define HAVE_OPENCV_CORE\n#define HAVE_OPENCV_DATASETS\n#define HAVE_OPENCV_DNN\n#define HAVE_OPENCV_DPM\n#define HAVE_OPENCV_FACE\n#define HAVE_OPENCV_FEATURES2D\n#define HAVE_OPENCV_FLANN\n#define HAVE_OPENCV_FUZZY\n#define HAVE_OPENCV_HIGHGUI\n#define HAVE_OPENCV_IMGCODECS\n#define HAVE_OPENCV_IMGPROC\n#define HAVE_OPENCV_LINE_DESCRIPTOR\n#define HAVE_OPENCV_ML\n#define HAVE_OPENCV_OBJDETECT\n#define HAVE_OPENCV_OPTFLOW\n#define HAVE_OPENCV_PHOTO\n#define HAVE_OPENCV_PLOT\n#define HAVE_OPENCV_REG\n#define HAVE_OPENCV_RGBD\n#define HAVE_OPENCV_SALIENCY\n#define HAVE_OPENCV_SHAPE\n#define HAVE_OPENCV_STEREO\n#define HAVE_OPENCV_STITCHING\n#define HAVE_OPENCV_STRUCTURED_LIGHT\n#define HAVE_OPENCV_SUPERRES\n#define HAVE_OPENCV_SURFACE_MATCHING\n#define HAVE_OPENCV_TEXT\n#define HAVE_OPENCV_TRACKING\n#define HAVE_OPENCV_VIDEO\n#define HAVE_OPENCV_VIDEOIO\n#define HAVE_OPENCV_VIDEOSTAB\n#define HAVE_OPENCV_XFEATURES2D\n#define HAVE_OPENCV_XIMGPROC\n#define HAVE_OPENCV_XOBJDETECT\n#define HAVE_OPENCV_XPHOTO\n\n\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/optflow/motempl.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_OPTFLOW_MOTEMPL_HPP__\n#define __OPENCV_OPTFLOW_MOTEMPL_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\nnamespace motempl\n{\n\n//! @addtogroup optflow\n//! @{\n\n/** @brief Updates the motion history image by a moving silhouette.\n\n@param silhouette Silhouette mask that has non-zero pixels where the motion occurs.\n@param mhi Motion history image that is updated by the function (single-channel, 32-bit\nfloating-point).\n@param timestamp Current time in milliseconds or other units.\n@param duration Maximal duration of the motion track in the same units as timestamp .\n\nThe function updates the motion history image as follows:\n\n\\f[\\texttt{mhi} (x,y)= \\forkthree{\\texttt{timestamp}}{if \\(\\texttt{silhouette}(x,y) \\ne 0\\)}{0}{if \\(\\texttt{silhouette}(x,y) = 0\\) and \\(\\texttt{mhi} < (\\texttt{timestamp} - \\texttt{duration})\\)}{\\texttt{mhi}(x,y)}{otherwise}\\f]\n\nThat is, MHI pixels where the motion occurs are set to the current timestamp , while the pixels\nwhere the motion happened last time a long time ago are cleared.\n\nThe function, together with calcMotionGradient and calcGlobalOrientation , implements a motion\ntemplates technique described in @cite Davis97 and @cite Bradski00 .\n */\nCV_EXPORTS_W void updateMotionHistory( InputArray silhouette, InputOutputArray mhi,\n                                       double timestamp, double duration );\n\n/** @brief Calculates a gradient orientation of a motion history image.\n\n@param mhi Motion history single-channel floating-point image.\n@param mask Output mask image that has the type CV_8UC1 and the same size as mhi . Its non-zero\nelements mark pixels where the motion gradient data is correct.\n@param orientation Output motion gradient orientation image that has the same type and the same\nsize as mhi . Each pixel of the image is a motion orientation, from 0 to 360 degrees.\n@param delta1 Minimal (or maximal) allowed difference between mhi values within a pixel\nneighborhood.\n@param delta2 Maximal (or minimal) allowed difference between mhi values within a pixel\nneighborhood. That is, the function finds the minimum ( \\f$m(x,y)\\f$ ) and maximum ( \\f$M(x,y)\\f$ ) mhi\nvalues over \\f$3 \\times 3\\f$ neighborhood of each pixel and marks the motion orientation at \\f$(x, y)\\f$\nas valid only if\n\\f[\\min ( \\texttt{delta1}  ,  \\texttt{delta2}  )  \\le  M(x,y)-m(x,y)  \\le   \\max ( \\texttt{delta1}  , \\texttt{delta2} ).\\f]\n@param apertureSize Aperture size of the Sobel operator.\n\nThe function calculates a gradient orientation at each pixel \\f$(x, y)\\f$ as:\n\n\\f[\\texttt{orientation} (x,y)= \\arctan{\\frac{d\\texttt{mhi}/dy}{d\\texttt{mhi}/dx}}\\f]\n\nIn fact, fastAtan2 and phase are used so that the computed angle is measured in degrees and covers\nthe full range 0..360. Also, the mask is filled to indicate pixels where the computed angle is\nvalid.\n\n@note\n   -   (Python) An example on how to perform a motion template technique can be found at\n        opencv_source_code/samples/python2/motempl.py\n */\nCV_EXPORTS_W void calcMotionGradient( InputArray mhi, OutputArray mask, OutputArray orientation,\n                                      double delta1, double delta2, int apertureSize = 3 );\n\n/** @brief Calculates a global motion orientation in a selected region.\n\n@param orientation Motion gradient orientation image calculated by the function calcMotionGradient\n@param mask Mask image. It may be a conjunction of a valid gradient mask, also calculated by\ncalcMotionGradient , and the mask of a region whose direction needs to be calculated.\n@param mhi Motion history image calculated by updateMotionHistory .\n@param timestamp Timestamp passed to updateMotionHistory .\n@param duration Maximum duration of a motion track in milliseconds, passed to updateMotionHistory\n\nThe function calculates an average motion direction in the selected region and returns the angle\nbetween 0 degrees and 360 degrees. The average direction is computed from the weighted orientation\nhistogram, where a recent motion has a larger weight and the motion occurred in the past has a\nsmaller weight, as recorded in mhi .\n */\nCV_EXPORTS_W double calcGlobalOrientation( InputArray orientation, InputArray mask, InputArray mhi,\n                                           double timestamp, double duration );\n\n/** @brief Splits a motion history image into a few parts corresponding to separate independent motions (for\nexample, left hand, right hand).\n\n@param mhi Motion history image.\n@param segmask Image where the found mask should be stored, single-channel, 32-bit floating-point.\n@param boundingRects Vector containing ROIs of motion connected components.\n@param timestamp Current time in milliseconds or other units.\n@param segThresh Segmentation threshold that is recommended to be equal to the interval between\nmotion history \"steps\" or greater.\n\nThe function finds all of the motion segments and marks them in segmask with individual values\n(1,2,...). It also computes a vector with ROIs of motion connected components. After that the motion\ndirection for every component can be calculated with calcGlobalOrientation using the extracted mask\nof the particular component.\n */\nCV_EXPORTS_W void segmentMotion( InputArray mhi, OutputArray segmask,\n                                 CV_OUT std::vector<Rect>& boundingRects,\n                                 double timestamp, double segThresh );\n                                 \n\n//! @}\n\n}\n}                                 \n                                 \n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/optflow.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_OPTFLOW_HPP__\n#define __OPENCV_OPTFLOW_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/video.hpp\"\n\n/**\n@defgroup optflow Optical Flow Algorithms\n\nDense optical flow algorithms compute motion for each point:\n\n- cv::optflow::calcOpticalFlowSF\n- cv::optflow::createOptFlow_DeepFlow\n\nMotion templates is alternative technique for detecting motion and computing its direction.\nSee samples/motempl.py.\n\n- cv::motempl::updateMotionHistory\n- cv::motempl::calcMotionGradient\n- cv::motempl::calcGlobalOrientation\n- cv::motempl::segmentMotion\n\nFunctions reading and writing .flo files in \"Middlebury\" format, see: <http://vision.middlebury.edu/flow/code/flow-code/README.txt>\n\n- cv::optflow::readOpticalFlow\n- cv::optflow::writeOpticalFlow\n\n */\n\nnamespace cv\n{\nnamespace optflow\n{\n    \n//! @addtogroup optflow\n//! @{\n\n/** @overload */\nCV_EXPORTS_W void calcOpticalFlowSF( InputArray from, InputArray to, OutputArray flow,\n                                     int layers, int averaging_block_size, int max_flow);\n\n/** @brief Calculate an optical flow using \"SimpleFlow\" algorithm.\n\n@param from First 8-bit 3-channel image.\n@param to Second 8-bit 3-channel image of the same size as prev\n@param flow computed flow image that has the same size as prev and type CV_32FC2\n@param layers Number of layers\n@param averaging_block_size Size of block through which we sum up when calculate cost function\nfor pixel\n@param max_flow maximal flow that we search at each level\n@param sigma_dist vector smooth spatial sigma parameter\n@param sigma_color vector smooth color sigma parameter\n@param postprocess_window window size for postprocess cross bilateral filter\n@param sigma_dist_fix spatial sigma for postprocess cross bilateralf filter\n@param sigma_color_fix color sigma for postprocess cross bilateral filter\n@param occ_thr threshold for detecting occlusions\n@param upscale_averaging_radius window size for bilateral upscale operation\n@param upscale_sigma_dist spatial sigma for bilateral upscale operation\n@param upscale_sigma_color color sigma for bilateral upscale operation\n@param speed_up_thr threshold to detect point with irregular flow - where flow should be\nrecalculated after upscale\n\nSee @cite Tao2012 . And site of project - <http://graphics.berkeley.edu/papers/Tao-SAN-2012-05/>.\n\n@note\n   -   An example using the simpleFlow algorithm can be found at samples/simpleflow_demo.cpp\n */\nCV_EXPORTS_W void calcOpticalFlowSF( InputArray from, InputArray to, OutputArray flow, int layers,\n                                     int averaging_block_size, int max_flow,\n                                     double sigma_dist, double sigma_color, int postprocess_window,\n                                     double sigma_dist_fix, double sigma_color_fix, double occ_thr,\n                                     int upscale_averaging_radius, double upscale_sigma_dist,\n                                     double upscale_sigma_color, double speed_up_thr );\n\n/** @brief Fast dense optical flow based on PyrLK sparse matches interpolation.\n\n@param from first 8-bit 3-channel or 1-channel image.\n@param to  second 8-bit 3-channel or 1-channel image of the same size as from\n@param flow computed flow image that has the same size as from and CV_32FC2 type\n@param grid_step stride used in sparse match computation. Lower values usually\n       result in higher quality but slow down the algorithm.\n@param k number of nearest-neighbor matches considered, when fitting a locally affine\n       model. Lower values can make the algorithm noticeably faster at the cost of\n       some quality degradation.\n@param sigma parameter defining how fast the weights decrease in the locally-weighted affine\n       fitting. Higher values can help preserve fine details, lower values can help to get rid\n       of the noise in the output flow.\n@param use_post_proc defines whether the ximgproc::fastGlobalSmootherFilter() is used\n       for post-processing after interpolation\n@param fgs_lambda see the respective parameter of the ximgproc::fastGlobalSmootherFilter()\n@param fgs_sigma  see the respective parameter of the ximgproc::fastGlobalSmootherFilter()\n */\nCV_EXPORTS_W void calcOpticalFlowSparseToDense ( InputArray from, InputArray to, OutputArray flow,\n                                                 int grid_step = 8, int k = 128, float sigma = 0.05f,\n                                                 bool use_post_proc = true, float fgs_lambda = 500.0f,\n                                                 float fgs_sigma = 1.5f );\n\n/** @brief Read a .flo file\n\n@param path Path to the file to be loaded\n\nThe function readOpticalFlow loads a flow field from a file and returns it as a single matrix.\nResulting Mat has a type CV_32FC2 - floating-point, 2-channel. First channel corresponds to the\nflow in the horizontal direction (u), second - vertical (v).\n */\nCV_EXPORTS_W Mat readOpticalFlow( const String& path );\n/** @brief Write a .flo to disk\n\n@param path Path to the file to be written\n@param flow Flow field to be stored\n\nThe function stores a flow field in a file, returns true on success, false otherwise.\nThe flow field must be a 2-channel, floating-point matrix (CV_32FC2). First channel corresponds\nto the flow in the horizontal direction (u), second - vertical (v).\n */\nCV_EXPORTS_W bool writeOpticalFlow( const String& path, InputArray flow );\n\n\n/** @brief DeepFlow optical flow algorithm implementation.\n\nThe class implements the DeepFlow optical flow algorithm described in @cite Weinzaepfel2013 . See\nalso <http://lear.inrialpes.fr/src/deepmatching/> .\nParameters - class fields - that may be modified after creating a class instance:\n-   member float alpha\nSmoothness assumption weight\n-   member float delta\nColor constancy assumption weight\n-   member float gamma\nGradient constancy weight\n-   member float sigma\nGaussian smoothing parameter\n-   member int minSize\nMinimal dimension of an image in the pyramid (next, smaller images in the pyramid are generated\nuntil one of the dimensions reaches this size)\n-   member float downscaleFactor\nScaling factor in the image pyramid (must be \\< 1)\n-   member int fixedPointIterations\nHow many iterations on each level of the pyramid\n-   member int sorIterations\nIterations of Succesive Over-Relaxation (solver)\n-   member float omega\nRelaxation factor in SOR\n */\nCV_EXPORTS_W Ptr<DenseOpticalFlow> createOptFlow_DeepFlow();\n\n//! Additional interface to the SimpleFlow algorithm - calcOpticalFlowSF()\nCV_EXPORTS_W Ptr<DenseOpticalFlow> createOptFlow_SimpleFlow();\n\n//! Additional interface to the Farneback's algorithm - calcOpticalFlowFarneback()\nCV_EXPORTS_W Ptr<DenseOpticalFlow> createOptFlow_Farneback();\n\n//! Additional interface to the SparseToDenseFlow algorithm - calcOpticalFlowSparseToDense()\nCV_EXPORTS_W Ptr<DenseOpticalFlow> createOptFlow_SparseToDense();\n\n//! @}\n\n} //optflow\n}\n\n#include \"opencv2/optflow/motempl.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/photo/cuda.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_PHOTO_CUDA_HPP__\n#define __OPENCV_PHOTO_CUDA_HPP__\n\n#include \"opencv2/core/cuda.hpp\"\n\nnamespace cv { namespace cuda {\n\n//! @addtogroup photo_denoise\n//! @{\n\n/** @brief Performs pure non local means denoising without any simplification, and thus it is not fast.\n\n@param src Source image. Supports only CV_8UC1, CV_8UC2 and CV_8UC3.\n@param dst Destination image.\n@param h Filter sigma regulating filter strength for color.\n@param search_window Size of search window.\n@param block_size Size of block used for computing weights.\n@param borderMode Border type. See borderInterpolate for details. BORDER_REFLECT101 ,\nBORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.\n@param stream Stream for the asynchronous version.\n\n@sa\n   fastNlMeansDenoising\n */\nCV_EXPORTS void nonLocalMeans(InputArray src, OutputArray dst,\n                              float h,\n                              int search_window = 21,\n                              int block_size = 7,\n                              int borderMode = BORDER_DEFAULT,\n                              Stream& stream = Stream::Null());\n\n/** @brief Perform image denoising using Non-local Means Denoising algorithm\n<http://www.ipol.im/pub/algo/bcm_non_local_means_denoising> with several computational\noptimizations. Noise expected to be a gaussian white noise\n\n@param src Input 8-bit 1-channel, 2-channel or 3-channel image.\n@param dst Output image with the same size and type as src .\n@param h Parameter regulating filter strength. Big h value perfectly removes noise but also\nremoves image details, smaller h value preserves details but also preserves some noise\n@param search_window Size in pixels of the window that is used to compute weighted average for\ngiven pixel. Should be odd. Affect performance linearly: greater search_window - greater\ndenoising time. Recommended value 21 pixels\n@param block_size Size in pixels of the template patch that is used to compute weights. Should be\nodd. Recommended value 7 pixels\n@param stream Stream for the asynchronous invocations.\n\nThis function expected to be applied to grayscale images. For colored images look at\nFastNonLocalMeansDenoising::labMethod.\n\n@sa\n   fastNlMeansDenoising\n */\nCV_EXPORTS void fastNlMeansDenoising(InputArray src, OutputArray dst,\n                                     float h,\n                                     int search_window = 21,\n                                     int block_size = 7,\n                                     Stream& stream = Stream::Null());\n\n/** @brief Modification of fastNlMeansDenoising function for colored images\n\n@param src Input 8-bit 3-channel image.\n@param dst Output image with the same size and type as src .\n@param h_luminance Parameter regulating filter strength. Big h value perfectly removes noise but\nalso removes image details, smaller h value preserves details but also preserves some noise\n@param photo_render float The same as h but for color components. For most images value equals 10 will be\nenough to remove colored noise and do not distort colors\n@param search_window Size in pixels of the window that is used to compute weighted average for\ngiven pixel. Should be odd. Affect performance linearly: greater search_window - greater\ndenoising time. Recommended value 21 pixels\n@param block_size Size in pixels of the template patch that is used to compute weights. Should be\nodd. Recommended value 7 pixels\n@param stream Stream for the asynchronous invocations.\n\nThe function converts image to CIELAB colorspace and then separately denoise L and AB components\nwith given h parameters using FastNonLocalMeansDenoising::simpleMethod function.\n\n@sa\n   fastNlMeansDenoisingColored\n */\nCV_EXPORTS void fastNlMeansDenoisingColored(InputArray src, OutputArray dst,\n                                            float h_luminance, float photo_render,\n                                            int search_window = 21,\n                                            int block_size = 7,\n                                            Stream& stream = Stream::Null());\n\n//! @} photo\n\n}} // namespace cv { namespace cuda {\n\n#endif /* __OPENCV_PHOTO_CUDA_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/photo/photo.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/photo.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/photo/photo_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_PHOTO_C_H__\n#define __OPENCV_PHOTO_C_H__\n\n#include \"opencv2/core/core_c.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/** @addtogroup photo_c\n  @{\n  */\n\n/* Inpainting algorithms */\nenum InpaintingModes\n{\n    CV_INPAINT_NS      =0,\n    CV_INPAINT_TELEA   =1\n};\n\n\n/* Inpaints the selected region in the image */\nCVAPI(void) cvInpaint( const CvArr* src, const CvArr* inpaint_mask,\n                       CvArr* dst, double inpaintRange, int flags );\n\n/** @} */\n\n#ifdef __cplusplus\n} //extern \"C\"\n#endif\n\n#endif //__OPENCV_PHOTO_C_H__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/photo.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_PHOTO_HPP__\n#define __OPENCV_PHOTO_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/imgproc.hpp\"\n\n/**\n@defgroup photo Computational Photography\n@{\n    @defgroup photo_denoise Denoising\n    @defgroup photo_hdr HDR imaging\n\nThis section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment,\ncamera calibration with multiple exposures and exposure fusion.\n\n    @defgroup photo_clone Seamless Cloning\n    @defgroup photo_render Non-Photorealistic Rendering\n    @defgroup photo_c C API\n@}\n  */\n\nnamespace cv\n{\n\n//! @addtogroup photo\n//! @{\n\n//! the inpainting algorithm\nenum\n{\n    INPAINT_NS    = 0, // Navier-Stokes algorithm\n    INPAINT_TELEA = 1 // A. Telea algorithm\n};\n\nenum\n{\n    NORMAL_CLONE = 1,\n    MIXED_CLONE  = 2,\n    MONOCHROME_TRANSFER = 3\n};\n\nenum\n{\n    RECURS_FILTER = 1,\n    NORMCONV_FILTER = 2\n};\n\n/** @brief Restores the selected region in an image using the region neighborhood.\n\n@param src Input 8-bit 1-channel or 3-channel image.\n@param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that\nneeds to be inpainted.\n@param dst Output image with the same size and type as src .\n@param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered\nby the algorithm.\n@param flags Inpainting method that could be one of the following:\n-   **INPAINT_NS** Navier-Stokes based method [Navier01]\n-   **INPAINT_TELEA** Method by Alexandru Telea @cite Telea04 .\n\nThe function reconstructs the selected image area from the pixel near the area boundary. The\nfunction may be used to remove dust and scratches from a scanned photo, or to remove undesirable\nobjects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting> for more details.\n\n@note\n   -   An example using the inpainting technique can be found at\n        opencv_source_code/samples/cpp/inpaint.cpp\n    -   (Python) An example using the inpainting technique can be found at\n        opencv_source_code/samples/python/inpaint.py\n */\nCV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,\n        OutputArray dst, double inpaintRadius, int flags );\n\n//! @addtogroup photo_denoise\n//! @{\n\n/** @brief Perform image denoising using Non-local Means Denoising algorithm\n<http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational\noptimizations. Noise expected to be a gaussian white noise\n\n@param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image.\n@param dst Output image with the same size and type as src .\n@param templateWindowSize Size in pixels of the template patch that is used to compute weights.\nShould be odd. Recommended value 7 pixels\n@param searchWindowSize Size in pixels of the window that is used to compute weighted average for\ngiven pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater\ndenoising time. Recommended value 21 pixels\n@param h Parameter regulating filter strength. Big h value perfectly removes noise but also\nremoves image details, smaller h value preserves details but also preserves some noise\n\nThis function expected to be applied to grayscale images. For colored images look at\nfastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored\nimage in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting\nimage to CIELAB colorspace and then separately denoise L and AB components with different h\nparameter.\n */\nCV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, float h = 3,\n        int templateWindowSize = 7, int searchWindowSize = 21);\n\n/** @brief Perform image denoising using Non-local Means Denoising algorithm\n<http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational\noptimizations. Noise expected to be a gaussian white noise\n\n@param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel,\n2-channel, 3-channel or 4-channel image.\n@param dst Output image with the same size and type as src .\n@param templateWindowSize Size in pixels of the template patch that is used to compute weights.\nShould be odd. Recommended value 7 pixels\n@param searchWindowSize Size in pixels of the window that is used to compute weighted average for\ngiven pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater\ndenoising time. Recommended value 21 pixels\n@param h Array of parameters regulating filter strength, either one\nparameter applied to all channels or one per channel in dst. Big h value\nperfectly removes noise but also removes image details, smaller h\nvalue preserves details but also preserves some noise\n@param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1\n\nThis function expected to be applied to grayscale images. For colored images look at\nfastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored\nimage in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting\nimage to CIELAB colorspace and then separately denoise L and AB components with different h\nparameter.\n */\nCV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst,\n                                        const std::vector<float>& h,\n                                        int templateWindowSize = 7, int searchWindowSize = 21,\n                                        int normType = NORM_L2);\n\n/** @brief Modification of fastNlMeansDenoising function for colored images\n\n@param src Input 8-bit 3-channel image.\n@param dst Output image with the same size and type as src .\n@param templateWindowSize Size in pixels of the template patch that is used to compute weights.\nShould be odd. Recommended value 7 pixels\n@param searchWindowSize Size in pixels of the window that is used to compute weighted average for\ngiven pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater\ndenoising time. Recommended value 21 pixels\n@param h Parameter regulating filter strength for luminance component. Bigger h value perfectly\nremoves noise but also removes image details, smaller h value preserves details but also preserves\nsome noise\n@param hColor The same as h but for color components. For most images value equals 10\nwill be enough to remove colored noise and do not distort colors\n\nThe function converts image to CIELAB colorspace and then separately denoise L and AB components\nwith given h parameters using fastNlMeansDenoising function.\n */\nCV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst,\n        float h = 3, float hColor = 3,\n        int templateWindowSize = 7, int searchWindowSize = 21);\n\n/** @brief Modification of fastNlMeansDenoising function for images sequence where consequtive images have been\ncaptured in small period of time. For example video. This version of the function is for grayscale\nimages or for manual manipulation with colorspaces. For more details see\n<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>\n\n@param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or\n4-channel images sequence. All images should have the same type and\nsize.\n@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence\n@param temporalWindowSize Number of surrounding images to use for target image denoising. Should\nbe odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to\nimgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise\nsrcImgs[imgToDenoiseIndex] image.\n@param dst Output image with the same size and type as srcImgs images.\n@param templateWindowSize Size in pixels of the template patch that is used to compute weights.\nShould be odd. Recommended value 7 pixels\n@param searchWindowSize Size in pixels of the window that is used to compute weighted average for\ngiven pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater\ndenoising time. Recommended value 21 pixels\n@param h Parameter regulating filter strength. Bigger h value\nperfectly removes noise but also removes image details, smaller h\nvalue preserves details but also preserves some noise\n */\nCV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,\n        int imgToDenoiseIndex, int temporalWindowSize,\n        float h = 3, int templateWindowSize = 7, int searchWindowSize = 21);\n\n/** @brief Modification of fastNlMeansDenoising function for images sequence where consequtive images have been\ncaptured in small period of time. For example video. This version of the function is for grayscale\nimages or for manual manipulation with colorspaces. For more details see\n<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>\n\n@param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel,\n2-channel, 3-channel or 4-channel images sequence. All images should\nhave the same type and size.\n@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence\n@param temporalWindowSize Number of surrounding images to use for target image denoising. Should\nbe odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to\nimgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise\nsrcImgs[imgToDenoiseIndex] image.\n@param dst Output image with the same size and type as srcImgs images.\n@param templateWindowSize Size in pixels of the template patch that is used to compute weights.\nShould be odd. Recommended value 7 pixels\n@param searchWindowSize Size in pixels of the window that is used to compute weighted average for\ngiven pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater\ndenoising time. Recommended value 21 pixels\n@param h Array of parameters regulating filter strength, either one\nparameter applied to all channels or one per channel in dst. Big h value\nperfectly removes noise but also removes image details, smaller h\nvalue preserves details but also preserves some noise\n@param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1\n */\nCV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,\n                                             int imgToDenoiseIndex, int temporalWindowSize,\n                                             const std::vector<float>& h,\n                                             int templateWindowSize = 7, int searchWindowSize = 21,\n                                             int normType = NORM_L2);\n\n/** @brief Modification of fastNlMeansDenoisingMulti function for colored images sequences\n\n@param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and\nsize.\n@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence\n@param temporalWindowSize Number of surrounding images to use for target image denoising. Should\nbe odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to\nimgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise\nsrcImgs[imgToDenoiseIndex] image.\n@param dst Output image with the same size and type as srcImgs images.\n@param templateWindowSize Size in pixels of the template patch that is used to compute weights.\nShould be odd. Recommended value 7 pixels\n@param searchWindowSize Size in pixels of the window that is used to compute weighted average for\ngiven pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater\ndenoising time. Recommended value 21 pixels\n@param h Parameter regulating filter strength for luminance component. Bigger h value perfectly\nremoves noise but also removes image details, smaller h value preserves details but also preserves\nsome noise.\n@param hColor The same as h but for color components.\n\nThe function converts images to CIELAB colorspace and then separately denoise L and AB components\nwith given h parameters using fastNlMeansDenoisingMulti function.\n */\nCV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs, OutputArray dst,\n        int imgToDenoiseIndex, int temporalWindowSize,\n        float h = 3, float hColor = 3,\n        int templateWindowSize = 7, int searchWindowSize = 21);\n\n/** @brief Primal-dual algorithm is an algorithm for solving special types of variational problems (that is,\nfinding a function to minimize some functional). As the image denoising, in particular, may be seen\nas the variational problem, primal-dual algorithm then can be used to perform denoising and this is\nexactly what is implemented.\n\nIt should be noted, that this implementation was taken from the July 2013 blog entry\n@cite MA13 , which also contained (slightly more general) ready-to-use source code on Python.\nSubsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end\nof July 2013 and finally it was slightly adapted by later authors.\n\nAlthough the thorough discussion and justification of the algorithm involved may be found in\n@cite ChambolleEtAl, it might make sense to skim over it here, following @cite MA13 . To begin\nwith, we consider the 1-byte gray-level images as the functions from the rectangular domain of\npixels (it may be seen as set\n\\f$\\left\\{(x,y)\\in\\mathbb{N}\\times\\mathbb{N}\\mid 1\\leq x\\leq n,\\;1\\leq y\\leq m\\right\\}\\f$ for some\n\\f$m,\\;n\\in\\mathbb{N}\\f$) into \\f$\\{0,1,\\dots,255\\}\\f$. We shall denote the noised images as \\f$f_i\\f$ and with\nthis view, given some image \\f$x\\f$ of the same size, we may measure how bad it is by the formula\n\n\\f[\\left\\|\\left\\|\\nabla x\\right\\|\\right\\| + \\lambda\\sum_i\\left\\|\\left\\|x-f_i\\right\\|\\right\\|\\f]\n\n\\f$\\|\\|\\cdot\\|\\|\\f$ here denotes \\f$L_2\\f$-norm and as you see, the first addend states that we want our\nimage to be smooth (ideally, having zero gradient, thus being constant) and the second states that\nwe want our result to be close to the observations we've got. If we treat \\f$x\\f$ as a function, this is\nexactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play.\n\n@param observations This array should contain one or more noised versions of the image that is to\nbe restored.\n@param result Here the denoised image will be stored. There is no need to do pre-allocation of\nstorage space, as it will be automatically allocated, if necessary.\n@param lambda Corresponds to \\f$\\lambda\\f$ in the formulas above. As it is enlarged, the smooth\n(blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly\nspeaking, as it becomes smaller, the result will be more blur but more sever outliers will be\nremoved.\n@param niters Number of iterations that the algorithm will run. Of course, as more iterations as\nbetter, but it is hard to quantitatively refine this statement, so just use the default and\nincrease it if the results are poor.\n */\nCV_EXPORTS_W void denoise_TVL1(const std::vector<Mat>& observations,Mat& result, double lambda=1.0, int niters=30);\n\n//! @} photo_denoise\n\n//! @addtogroup photo_hdr\n//! @{\n\nenum { LDR_SIZE = 256 };\n\n/** @brief Base class for tonemapping algorithms - tools that are used to map HDR image to 8-bit range.\n */\nclass CV_EXPORTS_W Tonemap : public Algorithm\n{\npublic:\n    /** @brief Tonemaps image\n\n    @param src source image - 32-bit 3-channel Mat\n    @param dst destination image - 32-bit 3-channel Mat with values in [0, 1] range\n     */\n    CV_WRAP virtual void process(InputArray src, OutputArray dst) = 0;\n\n    CV_WRAP virtual float getGamma() const = 0;\n    CV_WRAP virtual void setGamma(float gamma) = 0;\n};\n\n/** @brief Creates simple linear mapper with gamma correction\n\n@param gamma positive value for gamma correction. Gamma value of 1.0 implies no correction, gamma\nequal to 2.2f is suitable for most displays.\nGenerally gamma \\> 1 brightens the image and gamma \\< 1 darkens it.\n */\nCV_EXPORTS_W Ptr<Tonemap> createTonemap(float gamma = 1.0f);\n\n/** @brief Adaptive logarithmic mapping is a fast global tonemapping algorithm that scales the image in\nlogarithmic domain.\n\nSince it's a global operator the same function is applied to all the pixels, it is controlled by the\nbias parameter.\n\nOptional saturation enhancement is possible as described in @cite FL02 .\n\nFor more information see @cite DM03 .\n */\nclass CV_EXPORTS_W TonemapDrago : public Tonemap\n{\npublic:\n\n    CV_WRAP virtual float getSaturation() const = 0;\n    CV_WRAP virtual void setSaturation(float saturation) = 0;\n\n    CV_WRAP virtual float getBias() const = 0;\n    CV_WRAP virtual void setBias(float bias) = 0;\n};\n\n/** @brief Creates TonemapDrago object\n\n@param gamma gamma value for gamma correction. See createTonemap\n@param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater\nthan 1 increase saturation and values less than 1 decrease it.\n@param bias value for bias function in [0, 1] range. Values from 0.7 to 0.9 usually give best\nresults, default value is 0.85.\n */\nCV_EXPORTS_W Ptr<TonemapDrago> createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f);\n\n/** @brief This algorithm decomposes image into two layers: base layer and detail layer using bilateral filter\nand compresses contrast of the base layer thus preserving all the details.\n\nThis implementation uses regular bilateral filter from opencv.\n\nSaturation enhancement is possible as in ocvTonemapDrago.\n\nFor more information see @cite DD02 .\n */\nclass CV_EXPORTS_W TonemapDurand : public Tonemap\n{\npublic:\n\n    CV_WRAP virtual float getSaturation() const = 0;\n    CV_WRAP virtual void setSaturation(float saturation) = 0;\n\n    CV_WRAP virtual float getContrast() const = 0;\n    CV_WRAP virtual void setContrast(float contrast) = 0;\n\n    CV_WRAP virtual float getSigmaSpace() const = 0;\n    CV_WRAP virtual void setSigmaSpace(float sigma_space) = 0;\n\n    CV_WRAP virtual float getSigmaColor() const = 0;\n    CV_WRAP virtual void setSigmaColor(float sigma_color) = 0;\n};\n\n/** @brief Creates TonemapDurand object\n\n@param gamma gamma value for gamma correction. See createTonemap\n@param contrast resulting contrast on logarithmic scale, i. e. log(max / min), where max and min\nare maximum and minimum luminance values of the resulting image.\n@param saturation saturation enhancement value. See createTonemapDrago\n@param sigma_space bilateral filter sigma in color space\n@param sigma_color bilateral filter sigma in coordinate space\n */\nCV_EXPORTS_W Ptr<TonemapDurand>\ncreateTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation = 1.0f, float sigma_space = 2.0f, float sigma_color = 2.0f);\n\n/** @brief This is a global tonemapping operator that models human visual system.\n\nMapping function is controlled by adaptation parameter, that is computed using light adaptation and\ncolor adaptation.\n\nFor more information see @cite RD05 .\n */\nclass CV_EXPORTS_W TonemapReinhard : public Tonemap\n{\npublic:\n    CV_WRAP virtual float getIntensity() const = 0;\n    CV_WRAP virtual void setIntensity(float intensity) = 0;\n\n    CV_WRAP virtual float getLightAdaptation() const = 0;\n    CV_WRAP virtual void setLightAdaptation(float light_adapt) = 0;\n\n    CV_WRAP virtual float getColorAdaptation() const = 0;\n    CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0;\n};\n\n/** @brief Creates TonemapReinhard object\n\n@param gamma gamma value for gamma correction. See createTonemap\n@param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results.\n@param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel\nvalue, if 0 it's global, otherwise it's a weighted mean of this two cases.\n@param color_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently,\nif 0 adaptation level is the same for each channel.\n */\nCV_EXPORTS_W Ptr<TonemapReinhard>\ncreateTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f);\n\n/** @brief This algorithm transforms image to contrast using gradients on all levels of gaussian pyramid,\ntransforms contrast values to HVS response and scales the response. After this the image is\nreconstructed from new contrast values.\n\nFor more information see @cite MM06 .\n */\nclass CV_EXPORTS_W TonemapMantiuk : public Tonemap\n{\npublic:\n    CV_WRAP virtual float getScale() const = 0;\n    CV_WRAP virtual void setScale(float scale) = 0;\n\n    CV_WRAP virtual float getSaturation() const = 0;\n    CV_WRAP virtual void setSaturation(float saturation) = 0;\n};\n\n/** @brief Creates TonemapMantiuk object\n\n@param gamma gamma value for gamma correction. See createTonemap\n@param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing\ndynamic range. Values from 0.6 to 0.9 produce best results.\n@param saturation saturation enhancement value. See createTonemapDrago\n */\nCV_EXPORTS_W Ptr<TonemapMantiuk>\ncreateTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f);\n\n/** @brief The base class for algorithms that align images of the same scene with different exposures\n */\nclass CV_EXPORTS_W AlignExposures : public Algorithm\n{\npublic:\n    /** @brief Aligns images\n\n    @param src vector of input images\n    @param dst vector of aligned images\n    @param times vector of exposure time values for each image\n    @param response 256x1 matrix with inverse camera response function for each pixel value, it should\n    have the same number of channels as images.\n     */\n    CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,\n                                 InputArray times, InputArray response) = 0;\n};\n\n/** @brief This algorithm converts images to median threshold bitmaps (1 for pixels brighter than median\nluminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations.\n\nIt is invariant to exposure, so exposure values and camera response are not necessary.\n\nIn this implementation new image regions are filled with zeros.\n\nFor more information see @cite GW03 .\n */\nclass CV_EXPORTS_W AlignMTB : public AlignExposures\n{\npublic:\n    CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,\n                                 InputArray times, InputArray response) = 0;\n\n    /** @brief Short version of process, that doesn't take extra arguments.\n\n    @param src vector of input images\n    @param dst vector of aligned images\n     */\n    CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst) = 0;\n\n    /** @brief Calculates shift between two images, i. e. how to shift the second image to correspond it with the\n    first.\n\n    @param img0 first image\n    @param img1 second image\n     */\n    CV_WRAP virtual Point calculateShift(InputArray img0, InputArray img1) = 0;\n    /** @brief Helper function, that shift Mat filling new regions with zeros.\n\n    @param src input image\n    @param dst result image\n    @param shift shift value\n     */\n    CV_WRAP virtual void shiftMat(InputArray src, OutputArray dst, const Point shift) = 0;\n    /** @brief Computes median threshold and exclude bitmaps of given image.\n\n    @param img input image\n    @param tb median threshold bitmap\n    @param eb exclude bitmap\n     */\n    CV_WRAP virtual void computeBitmaps(InputArray img, OutputArray tb, OutputArray eb) = 0;\n\n    CV_WRAP virtual int getMaxBits() const = 0;\n    CV_WRAP virtual void setMaxBits(int max_bits) = 0;\n\n    CV_WRAP virtual int getExcludeRange() const = 0;\n    CV_WRAP virtual void setExcludeRange(int exclude_range) = 0;\n\n    CV_WRAP virtual bool getCut() const = 0;\n    CV_WRAP virtual void setCut(bool value) = 0;\n};\n\n/** @brief Creates AlignMTB object\n\n@param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are\nusually good enough (31 and 63 pixels shift respectively).\n@param exclude_range range for exclusion bitmap that is constructed to suppress noise around the\nmedian value.\n@param cut if true cuts images, otherwise fills the new regions with zeros.\n */\nCV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true);\n\n/** @brief The base class for camera response calibration algorithms.\n */\nclass CV_EXPORTS_W CalibrateCRF : public Algorithm\n{\npublic:\n    /** @brief Recovers inverse camera response.\n\n    @param src vector of input images\n    @param dst 256x1 matrix with inverse camera response function\n    @param times vector of exposure time values for each image\n     */\n    CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;\n};\n\n/** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective\nfunction as linear system. Objective function is constructed using pixel values on the same position\nin all images, extra term is added to make the result smoother.\n\nFor more information see @cite DM97 .\n */\nclass CV_EXPORTS_W CalibrateDebevec : public CalibrateCRF\n{\npublic:\n    CV_WRAP virtual float getLambda() const = 0;\n    CV_WRAP virtual void setLambda(float lambda) = 0;\n\n    CV_WRAP virtual int getSamples() const = 0;\n    CV_WRAP virtual void setSamples(int samples) = 0;\n\n    CV_WRAP virtual bool getRandom() const = 0;\n    CV_WRAP virtual void setRandom(bool random) = 0;\n};\n\n/** @brief Creates CalibrateDebevec object\n\n@param samples number of pixel locations to use\n@param lambda smoothness term weight. Greater values produce smoother results, but can alter the\nresponse.\n@param random if true sample pixel locations are chosen at random, otherwise the form a\nrectangular grid.\n */\nCV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false);\n\n/** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective\nfunction as linear system. This algorithm uses all image pixels.\n\nFor more information see @cite RB99 .\n */\nclass CV_EXPORTS_W CalibrateRobertson : public CalibrateCRF\n{\npublic:\n    CV_WRAP virtual int getMaxIter() const = 0;\n    CV_WRAP virtual void setMaxIter(int max_iter) = 0;\n\n    CV_WRAP virtual float getThreshold() const = 0;\n    CV_WRAP virtual void setThreshold(float threshold) = 0;\n\n    CV_WRAP virtual Mat getRadiance() const = 0;\n};\n\n/** @brief Creates CalibrateRobertson object\n\n@param max_iter maximal number of Gauss-Seidel solver iterations.\n@param threshold target difference between results of two successive steps of the minimization.\n */\nCV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f);\n\n/** @brief The base class algorithms that can merge exposure sequence to a single image.\n */\nclass CV_EXPORTS_W MergeExposures : public Algorithm\n{\npublic:\n    /** @brief Merges images.\n\n    @param src vector of input images\n    @param dst result image\n    @param times vector of exposure time values for each image\n    @param response 256x1 matrix with inverse camera response function for each pixel value, it should\n    have the same number of channels as images.\n     */\n    CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,\n                                 InputArray times, InputArray response) = 0;\n};\n\n/** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure\nvalues and camera response.\n\nFor more information see @cite DM97 .\n */\nclass CV_EXPORTS_W MergeDebevec : public MergeExposures\n{\npublic:\n    CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,\n                                 InputArray times, InputArray response) = 0;\n    CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;\n};\n\n/** @brief Creates MergeDebevec object\n */\nCV_EXPORTS_W Ptr<MergeDebevec> createMergeDebevec();\n\n/** @brief Pixels are weighted using contrast, saturation and well-exposedness measures, than images are\ncombined using laplacian pyramids.\n\nThe resulting image weight is constructed as weighted average of contrast, saturation and\nwell-exposedness measures.\n\nThe resulting image doesn't require tonemapping and can be converted to 8-bit image by multiplying\nby 255, but it's recommended to apply gamma correction and/or linear tonemapping.\n\nFor more information see @cite MK07 .\n */\nclass CV_EXPORTS_W MergeMertens : public MergeExposures\n{\npublic:\n    CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,\n                                 InputArray times, InputArray response) = 0;\n    /** @brief Short version of process, that doesn't take extra arguments.\n\n    @param src vector of input images\n    @param dst result image\n     */\n    CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst) = 0;\n\n    CV_WRAP virtual float getContrastWeight() const = 0;\n    CV_WRAP virtual void setContrastWeight(float contrast_weiht) = 0;\n\n    CV_WRAP virtual float getSaturationWeight() const = 0;\n    CV_WRAP virtual void setSaturationWeight(float saturation_weight) = 0;\n\n    CV_WRAP virtual float getExposureWeight() const = 0;\n    CV_WRAP virtual void setExposureWeight(float exposure_weight) = 0;\n};\n\n/** @brief Creates MergeMertens object\n\n@param contrast_weight contrast measure weight. See MergeMertens.\n@param saturation_weight saturation measure weight\n@param exposure_weight well-exposedness measure weight\n */\nCV_EXPORTS_W Ptr<MergeMertens>\ncreateMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f);\n\n/** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure\nvalues and camera response.\n\nFor more information see @cite RB99 .\n */\nclass CV_EXPORTS_W MergeRobertson : public MergeExposures\n{\npublic:\n    CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,\n                                 InputArray times, InputArray response) = 0;\n    CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;\n};\n\n/** @brief Creates MergeRobertson object\n */\nCV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson();\n\n//! @} photo_hdr\n\n/** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized\nblack-and-white photograph rendering, and in many single channel image processing applications\n@cite CL12 .\n\n@param src Input 8-bit 3-channel image.\n@param grayscale Output 8-bit 1-channel image.\n@param color_boost Output 8-bit 3-channel image.\n\nThis function is to be applied on color images.\n */\nCV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost);\n\n//! @addtogroup photo_clone\n//! @{\n\n/** @brief Image editing tasks concern either global changes (color/intensity corrections, filters,\ndeformations) or local changes concerned to a selection. Here we are interested in achieving local\nchanges, ones that are restricted to a region manually selected (ROI), in a seamless and effortless\nmanner. The extent of the changes ranges from slight distortions to complete replacement by novel\ncontent @cite PM03 .\n\n@param src Input 8-bit 3-channel image.\n@param dst Input 8-bit 3-channel image.\n@param mask Input 8-bit 1 or 3-channel image.\n@param p Point in dst image where object is placed.\n@param blend Output image with the same size and type as dst.\n@param flags Cloning method that could be one of the following:\n-   **NORMAL_CLONE** The power of the method is fully expressed when inserting objects with\ncomplex outlines into a new background\n-   **MIXED_CLONE** The classic method, color-based selection and alpha masking might be time\nconsuming and often leaves an undesirable halo. Seamless cloning, even averaged with the\noriginal image, is not effective. Mixed seamless cloning based on a loose selection proves\neffective.\n-   **FEATURE_EXCHANGE** Feature exchange allows the user to easily replace certain features of\none object by alternative features.\n */\nCV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p,\n        OutputArray blend, int flags);\n\n/** @brief Given an original color image, two differently colored versions of this image can be mixed\nseamlessly.\n\n@param src Input 8-bit 3-channel image.\n@param mask Input 8-bit 1 or 3-channel image.\n@param dst Output image with the same size and type as src .\n@param red_mul R-channel multiply factor.\n@param green_mul G-channel multiply factor.\n@param blue_mul B-channel multiply factor.\n\nMultiplication factor is between .5 to 2.5.\n */\nCV_EXPORTS_W void colorChange(InputArray src, InputArray mask, OutputArray dst, float red_mul = 1.0f,\n        float green_mul = 1.0f, float blue_mul = 1.0f);\n\n/** @brief Applying an appropriate non-linear transformation to the gradient field inside the selection and\nthen integrating back with a Poisson solver, modifies locally the apparent illumination of an image.\n\n@param src Input 8-bit 3-channel image.\n@param mask Input 8-bit 1 or 3-channel image.\n@param dst Output image with the same size and type as src.\n@param alpha Value ranges between 0-2.\n@param beta Value ranges between 0-2.\n\nThis is useful to highlight under-exposed foreground objects or to reduce specular reflections.\n */\nCV_EXPORTS_W void illuminationChange(InputArray src, InputArray mask, OutputArray dst,\n        float alpha = 0.2f, float beta = 0.4f);\n\n/** @brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one\nwashes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge\nDetector is used.\n\n@param src Input 8-bit 3-channel image.\n@param mask Input 8-bit 1 or 3-channel image.\n@param dst Output image with the same size and type as src.\n@param low_threshold Range from 0 to 100.\n@param high_threshold Value \\> 100.\n@param kernel_size The size of the Sobel kernel to be used.\n\n**NOTE:**\n\nThe algorithm assumes that the color of the source image is close to that of the destination. This\nassumption means that when the colors don't match, the source image color gets tinted toward the\ncolor of the destination image.\n */\nCV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray dst,\n        float low_threshold = 30, float high_threshold = 45,\n        int kernel_size = 3);\n\n//! @} photo_clone\n\n//! @addtogroup photo_render\n//! @{\n\n/** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing\nfilters are used in many different applications @cite EM11 .\n\n@param src Input 8-bit 3-channel image.\n@param dst Output 8-bit 3-channel image.\n@param flags Edge preserving filters:\n-   **RECURS_FILTER** = 1\n-   **NORMCONV_FILTER** = 2\n@param sigma_s Range between 0 to 200.\n@param sigma_r Range between 0 to 1.\n */\nCV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1,\n        float sigma_s = 60, float sigma_r = 0.4f);\n\n/** @brief This filter enhances the details of a particular image.\n\n@param src Input 8-bit 3-channel image.\n@param dst Output image with the same size and type as src.\n@param sigma_s Range between 0 to 200.\n@param sigma_r Range between 0 to 1.\n */\nCV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10,\n        float sigma_r = 0.15f);\n\n/** @brief Pencil-like non-photorealistic line drawing\n\n@param src Input 8-bit 3-channel image.\n@param dst1 Output 8-bit 1-channel image.\n@param dst2 Output image with the same size and type as src.\n@param sigma_s Range between 0 to 200.\n@param sigma_r Range between 0 to 1.\n@param shade_factor Range between 0 to 0.1.\n */\nCV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2,\n        float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f);\n\n/** @brief Stylization aims to produce digital imagery with a wide variety of effects not focused on\nphotorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low\ncontrast while preserving, or enhancing, high-contrast features.\n\n@param src Input 8-bit 3-channel image.\n@param dst Output image with the same size and type as src.\n@param sigma_s Range between 0 to 200.\n@param sigma_r Range between 0 to 1.\n */\nCV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60,\n        float sigma_r = 0.45f);\n\n//! @} photo_render\n\n//! @} photo\n\n} // cv\n\n#ifndef DISABLE_OPENCV_24_COMPATIBILITY\n#include \"opencv2/photo/photo_c.h\"\n#endif\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/plot.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n//################################################################################\n//\n//                    Created by Nuno Moutinho\n//\n//################################################################################\n\n#ifndef _OPENCV_PLOT_H_\n#define _OPENCV_PLOT_H_\n#ifdef __cplusplus\n\n#include <opencv2/core.hpp>\n\n/**\n@defgroup plot Plot function for Mat data\n*/\n\nnamespace cv\n{\n    namespace plot\n    {\n        class CV_EXPORTS_W Plot2d : public Algorithm\n        {\n            public:\n\n            CV_WRAP virtual void setMinX(double _plotMinX) = 0;\n            CV_WRAP virtual void setMinY(double _plotMinY) = 0;\n            CV_WRAP virtual void setMaxX(double _plotMaxX) = 0;\n            CV_WRAP virtual void setMaxY(double _plotMaxY) = 0;\n            CV_WRAP virtual void setPlotLineWidth(int _plotLineWidth) = 0;\n            CV_WRAP virtual void setPlotLineColor(Scalar _plotLineColor) = 0;\n            CV_WRAP virtual void setPlotBackgroundColor(Scalar _plotBackgroundColor) = 0;\n            CV_WRAP virtual void setPlotAxisColor(Scalar _plotAxisColor) = 0;\n            CV_WRAP virtual void setPlotGridColor(Scalar _plotGridColor) = 0;\n            CV_WRAP virtual void setPlotTextColor(Scalar _plotTextColor) = 0;\n            CV_WRAP virtual void setPlotSize(int _plotSizeWidth, int _plotSizeHeight) = 0;\n            CV_WRAP virtual void render(Mat &_plotResult) = 0;\n        };\n\n        CV_EXPORTS_W Ptr<Plot2d> createPlot2d(Mat data);\n        CV_EXPORTS_W Ptr<Plot2d> createPlot2d(Mat dataX, Mat dataY);\n    }\n}\n\n#endif\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/map.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAP_H_\n#define MAP_H_\n\n#include <opencv2/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)\n\n/** @defgroup reg Image Registration\n\nThe Registration module implements parametric image registration. The implemented method is direct\nalignment, that is, it uses directly the pixel values for calculating the registration between a\npair of images, as opposed to feature-based registration. The implementation follows essentially the\ncorresponding part of @cite Szeliski06 .\n\nFeature based methods have some advantages over pixel based methods when we are trying to register\npictures that have been shoot under different lighting conditions or exposition times, or when the\nimages overlap only partially. On the other hand, the main advantage of pixel-based methods when\ncompared to feature based methods is their better precision for some pictures (those shoot under\nsimilar lighting conditions and that have a significative overlap), due to the fact that we are\nusing all the information available in the image, which allows us to achieve subpixel accuracy. This\nis particularly important for certain applications like multi-frame denoising or super-resolution.\n\nIn fact, pixel and feature registration methods can complement each other: an application could\nfirst obtain a coarse registration using features and then refine the registration using a pixel\nbased method on the overlapping area of the images. The code developed allows this use case.\n\nThe module implements classes derived from the abstract classes cv::reg::Map or cv::reg::Mapper. The\nformer models a coordinate transformation between two reference frames, while the later encapsulates\na way of invoking a method that calculates a Map between two images. Although the objective has been\nto implement pixel based methods, the module can be extended to support other methods that can\ncalculate transformations between images (feature methods, optical flow, etc.).\n\nEach class derived from Map implements a motion model, as follows:\n\n-   MapShift: Models a simple translation\n-   MapAffine: Models an affine transformation\n-   MapProjec: Models a projective transformation\n\nMapProject can also be used to model affine motion or translations, but some operations on it are\nmore costly, and that is the reason for defining the other two classes.\n\nThe classes derived from Mapper are\n\n-   MapperGradShift: Gradient based alignment for calculating translations. It produces a MapShift\n    (two parameters that correspond to the shift vector).\n-   MapperGradEuclid: Gradient based alignment for euclidean motions, that is, rotations and\n    translations. It calculates three parameters (angle and shift vector), although the result is\n    stored in a MapAffine object for convenience.\n-   MapperGradSimilar: Gradient based alignment for calculating similarities, which adds scaling to\n    the euclidean motion. It calculates four parameters (two for the anti-symmetric matrix and two\n    for the shift vector), although the result is stored in a MapAffine object for better\n    convenience.\n-   MapperGradAffine: Gradient based alignment for an affine motion model. The number of parameters\n    is six and the result is stored in a MapAffine object.\n-   MapperGradProj: Gradient based alignment for calculating projective transformations. The number\n    of parameters is eight and the result is stored in a MapProject object.\n-   MapperPyramid: It implements hyerarchical motion estimation using a Gaussian pyramid. Its\n    constructor accepts as argument any other object that implements the Mapper interface, and it is\n    that mapper the one called by MapperPyramid for each scale of the pyramid.\n\nIf the motion between the images is not very small, the normal way of using these classes is to\ncreate a MapperGrad\\* object and use it as input to create a MapperPyramid, which in turn is called\nto perform the calculation. However, if the motion between the images is small enough, we can use\ndirectly the MapperGrad\\* classes. Another possibility is to use first a feature based method to\nperform a coarse registration and then do a refinement through MapperPyramid or directly a\nMapperGrad\\* object. The \"calculate\" method of the mappers accepts an initial estimation of the\nmotion as input.\n\nWhen deciding which MapperGrad to use we must take into account that mappers with more parameters\ncan handle more complex motions, but involve more calculations and are therefore slower. Also, if we\nare confident on the motion model that is followed by the sequence, increasing the number of\nparameters beyond what we need will decrease the accuracy: it is better to use the least number of\ndegrees of freedom that we can.\n\nIn the module tests there are examples that show how to register a pair of images using any of the\nimplemented mappers.\n*/\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/** @brief Base class for modelling a Map between two images.\n\nThe class is only used to define the common interface for any possible map.\n */\nclass CV_EXPORTS Map\n{\npublic:\n    /*!\n     * Virtual destructor\n     */\n    virtual ~Map(void);\n\n    /*!\n     * Warps image to a new coordinate frame. The calculation is img2(x)=img1(T^{-1}(x)), as we\n     * have to apply the inverse transformation to the points to move them to were the values\n     * of img2 are.\n     * \\param[in] img1 Original image\n     * \\param[out] img2 Warped image\n     */\n    virtual void warp(const cv::Mat& img1, cv::Mat& img2) const;\n\n    /*!\n     * Warps image to a new coordinate frame. The calculation is img2(x)=img1(T(x)), so in fact\n     * this is the inverse warping as we are taking the value of img1 with the forward\n     * transformation of the points.\n     * \\param[in] img1 Original image\n     * \\param[out] img2 Warped image\n     */\n    virtual void inverseWarp(const cv::Mat& img1, cv::Mat& img2) const = 0;\n\n    /*!\n     * Calculates the inverse map\n     * \\return Inverse map\n     */\n    virtual cv::Ptr<Map> inverseMap(void) const = 0;\n\n    /*!\n     * Changes the map composing the current transformation with the one provided in the call.\n     * The order is first the current transformation, then the input argument.\n     * \\param[in] map Transformation to compose with.\n     */\n    virtual void compose(const Map& map) = 0;\n\n    /*!\n     * Scales the map by a given factor as if the coordinates system is expanded/compressed\n     * by that factor.\n     * \\param[in] factor Expansion if bigger than one, compression if smaller than one\n     */\n    virtual void scale(double factor) = 0;\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAP_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/mapaffine.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAPAFFINE_H_\n#define MAPAFFINE_H_\n\n#include \"map.hpp\"\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/*!\n * Defines an affine transformation\n */\nclass CV_EXPORTS MapAffine : public Map\n{\npublic:\n    /*!\n     * Default constructor builds an identity map\n     */\n    MapAffine(void);\n\n    /*!\n     * Constructor providing explicit values\n     * \\param[in] linTr Linear part of the affine transformation\n     * \\param[in] shift Displacement part of the affine transformation\n     */\n    MapAffine(const cv::Matx<double, 2, 2>& linTr, const cv::Vec<double, 2>& shift);\n\n    /*!\n     * Destructor\n     */\n    ~MapAffine(void);\n\n    void inverseWarp(const cv::Mat& img1, cv::Mat& img2) const;\n\n    cv::Ptr<Map> inverseMap(void) const;\n\n    void compose(const Map& map);\n\n    void scale(double factor);\n\n    /*!\n     * Return linear part of the affine transformation\n     * \\return Linear part of the affine transformation\n     */\n     const cv::Matx<double, 2, 2>& getLinTr() const {\n        return linTr_;\n    }\n\n    /*!\n     * Return displacement part of the affine transformation\n     * \\return Displacement part of the affine transformation\n     */\n    const cv::Vec<double, 2>& getShift() const {\n        return shift_;\n    }\n\nprivate:\n    cv::Matx<double, 2, 2> linTr_;\n    cv::Vec<double, 2> shift_;\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAPAFFINE_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/mapper.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAPPER_H_\n#define MAPPER_H_\n\n#include <opencv2/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)\n#include \"map.hpp\"\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/** @brief Base class for modelling an algorithm for calculating a\n\nThe class is only used to define the common interface for any possible mapping algorithm.\n */\nclass CV_EXPORTS Mapper\n{\npublic:\n    virtual ~Mapper(void) {}\n\n    /*\n     * Calculate mapping between two images\n     * \\param[in] img1 Reference image\n     * \\param[in] img2 Warped image\n     * \\param[in,out] res Map from img1 to img2, stored in a smart pointer. If present as input,\n     *       it is an initial rough estimation that the mapper will try to refine.\n     */\n    virtual void calculate(const cv::Mat& img1, const cv::Mat& img2, cv::Ptr<Map>& res) const = 0;\n\n    /*\n     * Returns a map compatible with the Mapper class\n     * \\return Pointer to identity Map\n     */\n    virtual cv::Ptr<Map> getMap(void) const = 0;\n\nprotected:\n    /*\n     * Calculates gradient and difference between images\n     * \\param[in] img1 Image one\n     * \\param[in] img2 Image two\n     * \\param[out] Ix Gradient x-coordinate\n     * \\param[out] Iy Gradient y-coordinate\n     * \\param[out] It Difference of images\n     */\n    void gradient(const cv::Mat& img1, const cv::Mat& img2,\n                  cv::Mat& Ix, cv::Mat& Iy, cv::Mat& It) const;\n\n    /*\n     * Fills matrices with pixel coordinates of an image\n     * \\param[in] img Image\n     * \\param[out] grid_r Row (y-coordinate)\n     * \\param[out] grid_c Column (x-coordinate)\n     */\n    void grid(const Mat& img, Mat& grid_r, Mat& grid_c) const;\n\n    /*\n     * Per-element square of a matrix\n     * \\param[in] mat1 Input matrix\n     * \\return mat1[i,j]^2\n     */\n    cv::Mat sqr(const cv::Mat& mat1) const\n    {\n        cv::Mat res;\n        res.create(mat1.size(), mat1.type());\n        res = mat1.mul(mat1);\n        return res;\n    }\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAPPER_H_\n\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/mappergradaffine.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAPPERGRADAFFINE_H_\n#define MAPPERGRADAFFINE_H_\n\n#include \"mapper.hpp\"\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/*!\n * Mapper for affine motion\n */\nclass CV_EXPORTS MapperGradAffine: public Mapper\n{\npublic:\n    MapperGradAffine(void);\n    ~MapperGradAffine(void);\n\n    virtual void calculate(const cv::Mat& img1, const cv::Mat& img2, cv::Ptr<Map>& res) const;\n\n    cv::Ptr<Map> getMap(void) const;\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAPPERGRADAFFINE_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/mappergradeuclid.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAPPERGRADEUCLID_H_\n#define MAPPERGRADEUCLID_H_\n\n#include \"mapper.hpp\"\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/*!\n * Mapper for euclidean motion: rotation plus shift\n */\nclass CV_EXPORTS MapperGradEuclid: public Mapper\n{\npublic:\n    MapperGradEuclid(void);\n    ~MapperGradEuclid(void);\n\n    virtual void calculate(const cv::Mat& img1, const cv::Mat& img2, cv::Ptr<Map>& res) const;\n\n    cv::Ptr<Map> getMap(void) const;\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAPPERGRADEUCLID_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/mappergradproj.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAPPERGRADPROJ_H_\n#define MAPPERGRADPROJ_H_\n\n#include \"mapper.hpp\"\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/*!\n * Gradient mapper for a projective transformation\n */\nclass CV_EXPORTS MapperGradProj: public Mapper\n{\npublic:\n    MapperGradProj(void);\n    ~MapperGradProj(void);\n\n    virtual void calculate(const cv::Mat& img1, const cv::Mat& img2, cv::Ptr<Map>& res) const;\n\n    cv::Ptr<Map> getMap(void) const;\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAPPERGRADPROJ_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/mappergradshift.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAPPERGRADSHIFT_H_\n#define MAPPERGRADSHIFT_H_\n\n#include \"mapper.hpp\"\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/*!\n * Gradient mapper for a translation\n */\nclass CV_EXPORTS MapperGradShift: public Mapper\n{\npublic:\n    MapperGradShift(void);\n    virtual ~MapperGradShift(void);\n\n    virtual void calculate(const cv::Mat& img1, const cv::Mat& img2, cv::Ptr<Map>& res) const;\n\n    cv::Ptr<Map> getMap(void) const;\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAPPERGRADSHIFT_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/mappergradsimilar.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAPPERGRADSIMILAR_H_\n#define MAPPERGRADSIMILAR_H_\n\n#include \"mapper.hpp\"\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/*!\n * Calculates a similarity transformation between to images (scale, rotation, and shift)\n */\nclass CV_EXPORTS MapperGradSimilar: public Mapper\n{\npublic:\n    MapperGradSimilar(void);\n    ~MapperGradSimilar(void);\n\n    virtual void calculate(const cv::Mat& img1, const cv::Mat& img2, cv::Ptr<Map>& res) const;\n\n    cv::Ptr<Map> getMap(void) const;\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAPPERGRADSIMILAR_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/mapperpyramid.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAPPERPYRAMID_H_\n#define MAPPERPYRAMID_H_\n\n#include \"mapper.hpp\"\n\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/*!\n * Calculates a map using a gaussian pyramid\n */\nclass CV_EXPORTS MapperPyramid: public Mapper\n{\npublic:\n    /*\n     * Constructor\n     * \\param[in] baseMapper Base mapper used for the refinements\n     */\n    MapperPyramid(const Mapper& baseMapper);\n\n    void calculate(const cv::Mat& img1, const cv::Mat& img2, cv::Ptr<Map>& res) const;\n\n    cv::Ptr<Map> getMap(void) const;\n\n    unsigned numLev_;           /*!< Number of levels of the pyramid */\n    unsigned numIterPerScale_;  /*!< Number of iterations at a given scale of the pyramid */\n\nprivate:\n    MapperPyramid& operator=(const MapperPyramid&);\n    const Mapper& baseMapper_;  /*!< Mapper used in inner level */\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAPPERPYRAMID_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/mapprojec.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAPPROJEC_H_\n#define MAPPROJEC_H_\n\n#include \"map.hpp\"\n\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/*!\n * Defines an transformation that consists on a projective transformation\n */\nclass CV_EXPORTS MapProjec : public Map\n{\npublic:\n    /*!\n     * Default constructor builds an identity map\n     */\n    MapProjec(void);\n\n    /*!\n     * Constructor providing explicit values\n     * \\param[in] projTr Projective transformation\n     */\n    MapProjec(const cv::Matx<double, 3, 3>& projTr);\n\n    /*!\n     * Destructor\n     */\n    ~MapProjec(void);\n\n    void inverseWarp(const cv::Mat& img1, cv::Mat& img2) const;\n\n    cv::Ptr<Map> inverseMap(void) const;\n\n    void compose(const Map& map);\n\n    void scale(double factor);\n\n    /*!\n     * Returns projection matrix\n     * \\return Projection matrix\n     */\n    const cv::Matx<double, 3, 3>& getProjTr() const {\n        return projTr_;\n    }\n\n    /*!\n     * Normalizes object's homography\n     */\n    void normalize(void) {\n        double z = 1./projTr_(2, 2);\n        for(size_t v_i = 0; v_i < sizeof(projTr_.val)/sizeof(projTr_.val[0]); ++v_i)\n            projTr_.val[v_i] *= z;\n    }\n\nprivate:\n    cv::Matx<double, 3, 3> projTr_;       /*< Projection matrix */\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAPPROJEC_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/reg/mapshift.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n// Copyright (C) 2013, Alfonso Sanchez-Beato, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef MAPSHIFT_H_\n#define MAPSHIFT_H_\n\n#include \"map.hpp\"\n\n\nnamespace cv {\nnamespace reg {\n\n//! @addtogroup reg\n//! @{\n\n/*!\n * Defines an transformation that consists on a simple displacement\n */\nclass CV_EXPORTS MapShift : public Map\n{\npublic:\n    /*!\n     * Default constructor builds an identity map\n     */\n    MapShift(void);\n\n    /*!\n     * Constructor providing explicit values\n     * \\param[in] shift Displacement\n     */\n    MapShift(const cv::Vec<double, 2>& shift);\n\n    /*!\n     * Destructor\n     */\n    ~MapShift(void);\n\n    void inverseWarp(const cv::Mat& img1, cv::Mat& img2) const;\n\n    cv::Ptr<Map> inverseMap(void) const;\n\n    void compose(const Map& map);\n\n    void scale(double factor);\n\n    /*!\n     * Return displacement\n     * \\return Displacement\n     */\n    const cv::Vec<double, 2>& getShift() const {\n        return shift_;\n    }\n\nprivate:\n    cv::Vec<double, 2> shift_;      /*< Displacement */\n};\n\n//! @}\n\n}}  // namespace cv::reg\n\n#endif  // MAPSHIFT_H_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/rgbd/linemod.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_OBJDETECT_LINEMOD_HPP__\n#define __OPENCV_OBJDETECT_LINEMOD_HPP__\n\n#include \"opencv2/core.hpp\"\n#include <map>\n\n/****************************************************************************************\\\n*                                 LINE-MOD                                               *\n\\****************************************************************************************/\n\nnamespace cv {\nnamespace linemod {\n\n//! @addtogroup rgbd\n//! @{\n\n/**\n * \\brief Discriminant feature described by its location and label.\n */\nstruct CV_EXPORTS Feature\n{\n  int x; ///< x offset\n  int y; ///< y offset\n  int label; ///< Quantization\n\n  Feature() : x(0), y(0), label(0) {}\n  Feature(int x, int y, int label);\n\n  void read(const FileNode& fn);\n  void write(FileStorage& fs) const;\n};\n\ninline Feature::Feature(int _x, int _y, int _label) : x(_x), y(_y), label(_label) {}\n\nstruct CV_EXPORTS Template\n{\n  int width;\n  int height;\n  int pyramid_level;\n  std::vector<Feature> features;\n\n  void read(const FileNode& fn);\n  void write(FileStorage& fs) const;\n};\n\n/**\n * \\brief Represents a modality operating over an image pyramid.\n */\nclass QuantizedPyramid\n{\npublic:\n  // Virtual destructor\n  virtual ~QuantizedPyramid() {}\n\n  /**\n   * \\brief Compute quantized image at current pyramid level for online detection.\n   *\n   * \\param[out] dst The destination 8-bit image. For each pixel at most one bit is set,\n   *                 representing its classification.\n   */\n  virtual void quantize(Mat& dst) const =0;\n\n  /**\n   * \\brief Extract most discriminant features at current pyramid level to form a new template.\n   *\n   * \\param[out] templ The new template.\n   */\n  virtual bool extractTemplate(Template& templ) const =0;\n\n  /**\n   * \\brief Go to the next pyramid level.\n   *\n   * \\todo Allow pyramid scale factor other than 2\n   */\n  virtual void pyrDown() =0;\n\nprotected:\n  /// Candidate feature with a score\n  struct Candidate\n  {\n    Candidate(int x, int y, int label, float score);\n\n    /// Sort candidates with high score to the front\n    bool operator<(const Candidate& rhs) const\n    {\n      return score > rhs.score;\n    }\n\n    Feature f;\n    float score;\n  };\n\n  /**\n   * \\brief Choose candidate features so that they are not bunched together.\n   *\n   * \\param[in]  candidates   Candidate features sorted by score.\n   * \\param[out] features     Destination vector of selected features.\n   * \\param[in]  num_features Number of candidates to select.\n   * \\param[in]  distance     Hint for desired distance between features.\n   */\n  static void selectScatteredFeatures(const std::vector<Candidate>& candidates,\n                                      std::vector<Feature>& features,\n                                      size_t num_features, float distance);\n};\n\ninline QuantizedPyramid::Candidate::Candidate(int x, int y, int label, float _score) : f(x, y, label), score(_score) {}\n\n/**\n * \\brief Interface for modalities that plug into the LINE template matching representation.\n *\n * \\todo Max response, to allow optimization of summing (255/MAX) features as uint8\n */\nclass CV_EXPORTS Modality\n{\npublic:\n  // Virtual destructor\n  virtual ~Modality() {}\n\n  /**\n   * \\brief Form a quantized image pyramid from a source image.\n   *\n   * \\param[in] src  The source image. Type depends on the modality.\n   * \\param[in] mask Optional mask. If not empty, unmasked pixels are set to zero\n   *                 in quantized image and cannot be extracted as features.\n   */\n  Ptr<QuantizedPyramid> process(const Mat& src,\n                    const Mat& mask = Mat()) const\n  {\n    return processImpl(src, mask);\n  }\n\n  virtual String name() const =0;\n\n  virtual void read(const FileNode& fn) =0;\n  virtual void write(FileStorage& fs) const =0;\n\n  /**\n   * \\brief Create modality by name.\n   *\n   * The following modality types are supported:\n   * - \"ColorGradient\"\n   * - \"DepthNormal\"\n   */\n  static Ptr<Modality> create(const String& modality_type);\n\n  /**\n   * \\brief Load a modality from file.\n   */\n  static Ptr<Modality> create(const FileNode& fn);\n\nprotected:\n  // Indirection is because process() has a default parameter.\n  virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,\n                        const Mat& mask) const =0;\n};\n\n/**\n * \\brief Modality that computes quantized gradient orientations from a color image.\n */\nclass CV_EXPORTS ColorGradient : public Modality\n{\npublic:\n  /**\n   * \\brief Default constructor. Uses reasonable default parameter values.\n   */\n  ColorGradient();\n\n  /**\n   * \\brief Constructor.\n   *\n   * \\param weak_threshold   When quantizing, discard gradients with magnitude less than this.\n   * \\param num_features     How many features a template must contain.\n   * \\param strong_threshold Consider as candidate features only gradients whose norms are\n   *                         larger than this.\n   */\n  ColorGradient(float weak_threshold, size_t num_features, float strong_threshold);\n\n  virtual String name() const;\n\n  virtual void read(const FileNode& fn);\n  virtual void write(FileStorage& fs) const;\n\n  float weak_threshold;\n  size_t num_features;\n  float strong_threshold;\n\nprotected:\n  virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,\n                        const Mat& mask) const;\n};\n\n/**\n * \\brief Modality that computes quantized surface normals from a dense depth map.\n */\nclass CV_EXPORTS DepthNormal : public Modality\n{\npublic:\n  /**\n   * \\brief Default constructor. Uses reasonable default parameter values.\n   */\n  DepthNormal();\n\n  /**\n   * \\brief Constructor.\n   *\n   * \\param distance_threshold   Ignore pixels beyond this distance.\n   * \\param difference_threshold When computing normals, ignore contributions of pixels whose\n   *                             depth difference with the central pixel is above this threshold.\n   * \\param num_features         How many features a template must contain.\n   * \\param extract_threshold    Consider as candidate feature only if there are no differing\n   *                             orientations within a distance of extract_threshold.\n   */\n  DepthNormal(int distance_threshold, int difference_threshold, size_t num_features,\n              int extract_threshold);\n\n  virtual String name() const;\n\n  virtual void read(const FileNode& fn);\n  virtual void write(FileStorage& fs) const;\n\n  int distance_threshold;\n  int difference_threshold;\n  size_t num_features;\n  int extract_threshold;\n\nprotected:\n  virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,\n                        const Mat& mask) const;\n};\n\n/**\n * \\brief Debug function to colormap a quantized image for viewing.\n */\nvoid colormap(const Mat& quantized, Mat& dst);\n\n/**\n * \\brief Represents a successful template match.\n */\nstruct CV_EXPORTS Match\n{\n  Match()\n  {\n  }\n\n  Match(int x, int y, float similarity, const String& class_id, int template_id);\n\n  /// Sort matches with high similarity to the front\n  bool operator<(const Match& rhs) const\n  {\n    // Secondarily sort on template_id for the sake of duplicate removal\n    if (similarity != rhs.similarity)\n      return similarity > rhs.similarity;\n    else\n      return template_id < rhs.template_id;\n  }\n\n  bool operator==(const Match& rhs) const\n  {\n    return x == rhs.x && y == rhs.y && similarity == rhs.similarity && class_id == rhs.class_id;\n  }\n\n  int x;\n  int y;\n  float similarity;\n  String class_id;\n  int template_id;\n};\n\ninline\nMatch::Match(int _x, int _y, float _similarity, const String& _class_id, int _template_id)\n    : x(_x), y(_y), similarity(_similarity), class_id(_class_id), template_id(_template_id)\n{}\n\n/**\n * \\brief Object detector using the LINE template matching algorithm with any set of\n * modalities.\n */\nclass CV_EXPORTS Detector\n{\npublic:\n  /**\n   * \\brief Empty constructor, initialize with read().\n   */\n  Detector();\n\n  /**\n   * \\brief Constructor.\n   *\n   * \\param modalities       Modalities to use (color gradients, depth normals, ...).\n   * \\param T_pyramid        Value of the sampling step T at each pyramid level. The\n   *                         number of pyramid levels is T_pyramid.size().\n   */\n  Detector(const std::vector< Ptr<Modality> >& modalities, const std::vector<int>& T_pyramid);\n\n  /**\n   * \\brief Detect objects by template matching.\n   *\n   * Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid.\n   *\n   * \\param      sources   Source images, one for each modality.\n   * \\param      threshold Similarity threshold, a percentage between 0 and 100.\n   * \\param[out] matches   Template matches, sorted by similarity score.\n   * \\param      class_ids If non-empty, only search for the desired object classes.\n   * \\param[out] quantized_images Optionally return vector<Mat> of quantized images.\n   * \\param      masks     The masks for consideration during matching. The masks should be CV_8UC1\n   *                       where 255 represents a valid pixel.  If non-empty, the vector must be\n   *                       the same size as sources.  Each element must be\n   *                       empty or the same size as its corresponding source.\n   */\n  void match(const std::vector<Mat>& sources, float threshold, std::vector<Match>& matches,\n             const std::vector<String>& class_ids = std::vector<String>(),\n             OutputArrayOfArrays quantized_images = noArray(),\n             const std::vector<Mat>& masks = std::vector<Mat>()) const;\n\n  /**\n   * \\brief Add new object template.\n   *\n   * \\param      sources      Source images, one for each modality.\n   * \\param      class_id     Object class ID.\n   * \\param      object_mask  Mask separating object from background.\n   * \\param[out] bounding_box Optionally return bounding box of the extracted features.\n   *\n   * \\return Template ID, or -1 if failed to extract a valid template.\n   */\n  int addTemplate(const std::vector<Mat>& sources, const String& class_id,\n          const Mat& object_mask, Rect* bounding_box = NULL);\n\n  /**\n   * \\brief Add a new object template computed by external means.\n   */\n  int addSyntheticTemplate(const std::vector<Template>& templates, const String& class_id);\n\n  /**\n   * \\brief Get the modalities used by this detector.\n   *\n   * You are not permitted to add/remove modalities, but you may dynamic_cast them to\n   * tweak parameters.\n   */\n  const std::vector< Ptr<Modality> >& getModalities() const { return modalities; }\n\n  /**\n   * \\brief Get sampling step T at pyramid_level.\n   */\n  int getT(int pyramid_level) const { return T_at_level[pyramid_level]; }\n\n  /**\n   * \\brief Get number of pyramid levels used by this detector.\n   */\n  int pyramidLevels() const { return pyramid_levels; }\n\n  /**\n   * \\brief Get the template pyramid identified by template_id.\n   *\n   * For example, with 2 modalities (Gradient, Normal) and two pyramid levels\n   * (L0, L1), the order is (GradientL0, NormalL0, GradientL1, NormalL1).\n   */\n  const std::vector<Template>& getTemplates(const String& class_id, int template_id) const;\n\n  int numTemplates() const;\n  int numTemplates(const String& class_id) const;\n  int numClasses() const { return static_cast<int>(class_templates.size()); }\n\n  std::vector<String> classIds() const;\n\n  void read(const FileNode& fn);\n  void write(FileStorage& fs) const;\n\n  String readClass(const FileNode& fn, const String &class_id_override = \"\");\n  void writeClass(const String& class_id, FileStorage& fs) const;\n\n  void readClasses(const std::vector<String>& class_ids,\n                   const String& format = \"templates_%s.yml.gz\");\n  void writeClasses(const String& format = \"templates_%s.yml.gz\") const;\n\nprotected:\n  std::vector< Ptr<Modality> > modalities;\n  int pyramid_levels;\n  std::vector<int> T_at_level;\n\n  typedef std::vector<Template> TemplatePyramid;\n  typedef std::map<String, std::vector<TemplatePyramid> > TemplatesMap;\n  TemplatesMap class_templates;\n\n  typedef std::vector<Mat> LinearMemories;\n  // Indexed as [pyramid level][modality][quantized label]\n  typedef std::vector< std::vector<LinearMemories> > LinearMemoryPyramid;\n\n  void matchClass(const LinearMemoryPyramid& lm_pyramid,\n                  const std::vector<Size>& sizes,\n                  float threshold, std::vector<Match>& matches,\n                  const String& class_id,\n                  const std::vector<TemplatePyramid>& template_pyramids) const;\n};\n\n/**\n * \\brief Factory function for detector using LINE algorithm with color gradients.\n *\n * Default parameter settings suitable for VGA images.\n */\nCV_EXPORTS Ptr<Detector> getDefaultLINE();\n\n/**\n * \\brief Factory function for detector using LINE-MOD algorithm with color gradients\n * and depth normals.\n *\n * Default parameter settings suitable for VGA images.\n */\nCV_EXPORTS Ptr<Detector> getDefaultLINEMOD();\n\n//! @}\n\n} // namespace linemod\n} // namespace cv\n\n#endif // __OPENCV_OBJDETECT_LINEMOD_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/rgbd.hpp",
    "content": "/*\n * Software License Agreement (BSD License)\n *\n *  Copyright (c) 2009, Willow Garage, Inc.\n *  All rights reserved.\n *\n *  Redistribution and use in source and binary forms, with or without\n *  modification, are permitted provided that the following conditions\n *  are met:\n *\n *   * Redistributions of source code must retain the above copyright\n *     notice, this list of conditions and the following disclaimer.\n *   * Redistributions in binary form must reproduce the above\n *     copyright notice, this list of conditions and the following\n *     disclaimer in the documentation and/or other materials provided\n *     with the distribution.\n *   * Neither the name of Willow Garage, Inc. nor the names of its\n *     contributors may be used to endorse or promote products derived\n *     from this software without specific prior written permission.\n *\n *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n *  \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n *  POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef __OPENCV_RGBD_HPP__\n#define __OPENCV_RGBD_HPP__\n\n#ifdef __cplusplus\n\n#include <opencv2/core.hpp>\n#include <limits>\n\n/** @defgroup rgbd RGB-Depth Processing\n*/\n\nnamespace cv\n{\nnamespace rgbd\n{\n\n//! @addtogroup rgbd\n//! @{\n\n  /** Checks if the value is a valid depth. For CV_16U or CV_16S, the convention is to be invalid if it is\n   * a limit. For a float/double, we just check if it is a NaN\n   * @param depth the depth to check for validity\n   */\n  CV_EXPORTS\n  inline bool\n  isValidDepth(const float & depth)\n  {\n    return !cvIsNaN(depth);\n  }\n  CV_EXPORTS\n  inline bool\n  isValidDepth(const double & depth)\n  {\n    return !cvIsNaN(depth);\n  }\n  CV_EXPORTS\n  inline bool\n  isValidDepth(const short int & depth)\n  {\n    return (depth != std::numeric_limits<short int>::min()) && (depth != std::numeric_limits<short int>::max());\n  }\n  CV_EXPORTS\n  inline bool\n  isValidDepth(const unsigned short int & depth)\n  {\n    return (depth != std::numeric_limits<unsigned short int>::min())\n        && (depth != std::numeric_limits<unsigned short int>::max());\n  }\n  CV_EXPORTS\n  inline bool\n  isValidDepth(const int & depth)\n  {\n    return (depth != std::numeric_limits<int>::min()) && (depth != std::numeric_limits<int>::max());\n  }\n  CV_EXPORTS\n  inline bool\n  isValidDepth(const unsigned int & depth)\n  {\n    return (depth != std::numeric_limits<unsigned int>::min()) && (depth != std::numeric_limits<unsigned int>::max());\n  }\n\n  /** Object that can compute the normals in an image.\n   * It is an object as it can cache data for speed efficiency\n   * The implemented methods are either:\n   * - FALS (the fastest) and SRI from\n   * ``Fast and Accurate Computation of Surface Normals from Range Images``\n   * by H. Badino, D. Huber, Y. Park and T. Kanade\n   * - the normals with bilateral filtering on a depth image from\n   * ``Gradient Response Maps for Real-Time Detection of Texture-Less Objects``\n   * by S. Hinterstoisser, C. Cagniart, S. Ilic, P. Sturm, N. Navab, P. Fua, and V. Lepetit\n   */\n  class CV_EXPORTS RgbdNormals: public Algorithm\n  {\n  public:\n    enum RGBD_NORMALS_METHOD\n    {\n      RGBD_NORMALS_METHOD_FALS, RGBD_NORMALS_METHOD_LINEMOD, RGBD_NORMALS_METHOD_SRI\n    };\n\n    RgbdNormals()\n        :\n          rows_(0),\n          cols_(0),\n          depth_(0),\n          K_(Mat()),\n          window_size_(0),\n          method_(RGBD_NORMALS_METHOD_FALS),\n          rgbd_normals_impl_(0)\n    {\n    }\n\n    /** Constructor\n     * @param rows the number of rows of the depth image normals will be computed on\n     * @param cols the number of cols of the depth image normals will be computed on\n     * @param depth the depth of the normals (only CV_32F or CV_64F)\n     * @param K the calibration matrix to use\n     * @param window_size the window size to compute the normals: can only be 1,3,5 or 7\n     * @param method one of the methods to use: RGBD_NORMALS_METHOD_SRI, RGBD_NORMALS_METHOD_FALS\n     */\n    RgbdNormals(int rows, int cols, int depth, InputArray K, int window_size = 5, int method =\n        RGBD_NORMALS_METHOD_FALS);\n\n    ~RgbdNormals();\n\n    /** Given a set of 3d points in a depth image, compute the normals at each point.\n     * @param points a rows x cols x 3 matrix of CV_32F/CV64F or a rows x cols x 1 CV_U16S\n     * @param normals a rows x cols x 3 matrix\n     */\n    void\n    operator()(InputArray points, OutputArray normals) const;\n\n    /** Initializes some data that is cached for later computation\n     * If that function is not called, it will be called the first time normals are computed\n     */\n    void\n    initialize() const;\n\n    int getRows() const\n    {\n        return rows_;\n    }\n    void setRows(int val)\n    {\n        rows_ = val;\n    }\n    int getCols() const\n    {\n        return cols_;\n    }\n    void setCols(int val)\n    {\n        cols_ = val;\n    }\n    int getWindowSize() const\n    {\n        return window_size_;\n    }\n    void setWindowSize(int val)\n    {\n        window_size_ = val;\n    }\n    int getDepth() const\n    {\n        return depth_;\n    }\n    void setDepth(int val)\n    {\n        depth_ = val;\n    }\n    cv::Mat getK() const\n    {\n        return K_;\n    }\n    void setK(const cv::Mat &val)\n    {\n        K_ = val;\n    }\n    int getMethod() const\n    {\n        return method_;\n    }\n    void setMethod(int val)\n    {\n        method_ = val;\n    }\n\n  protected:\n    void\n    initialize_normals_impl(int rows, int cols, int depth, const Mat & K, int window_size, int method) const;\n\n    int rows_, cols_, depth_;\n    Mat K_;\n    int window_size_;\n    int method_;\n    mutable void* rgbd_normals_impl_;\n  };\n\n  /** Object that can clean a noisy depth image\n   */\n  class CV_EXPORTS DepthCleaner: public Algorithm\n  {\n  public:\n    /** NIL method is from\n     * ``Modeling Kinect Sensor Noise for Improved 3d Reconstruction and Tracking``\n     * by C. Nguyen, S. Izadi, D. Lovel\n     */\n    enum DEPTH_CLEANER_METHOD\n    {\n      DEPTH_CLEANER_NIL\n    };\n\n    DepthCleaner()\n        :\n          depth_(0),\n          window_size_(0),\n          method_(DEPTH_CLEANER_NIL),\n          depth_cleaner_impl_(0)\n    {\n    }\n\n    /** Constructor\n     * @param depth the depth of the normals (only CV_32F or CV_64F)\n     * @param window_size the window size to compute the normals: can only be 1,3,5 or 7\n     * @param method one of the methods to use: RGBD_NORMALS_METHOD_SRI, RGBD_NORMALS_METHOD_FALS\n     */\n    DepthCleaner(int depth, int window_size = 5, int method = DEPTH_CLEANER_NIL);\n\n    ~DepthCleaner();\n\n    /** Given a set of 3d points in a depth image, compute the normals at each point.\n     * @param points a rows x cols x 3 matrix of CV_32F/CV64F or a rows x cols x 1 CV_U16S\n     * @param depth a rows x cols matrix of the cleaned up depth\n     */\n    void\n    operator()(InputArray points, OutputArray depth) const;\n\n    /** Initializes some data that is cached for later computation\n     * If that function is not called, it will be called the first time normals are computed\n     */\n    void\n    initialize() const;\n\n    int getWindowSize() const\n    {\n        return window_size_;\n    }\n    void setWindowSize(int val)\n    {\n        window_size_ = val;\n    }\n    int getDepth() const\n    {\n        return depth_;\n    }\n    void setDepth(int val)\n    {\n        depth_ = val;\n    }\n    int getMethod() const\n    {\n        return method_;\n    }\n    void setMethod(int val)\n    {\n        method_ = val;\n    }\n\n  protected:\n    void\n    initialize_cleaner_impl() const;\n\n    int depth_;\n    int window_size_;\n    int method_;\n    mutable void* depth_cleaner_impl_;\n  };\n\n\n  /** Registers depth data to an external camera\n   * Registration is performed by creating a depth cloud, transforming the cloud by\n   * the rigid body transformation between the cameras, and then projecting the\n   * transformed points into the RGB camera.\n   *\n   * uv_rgb = K_rgb * [R | t] * z * inv(K_ir) * uv_ir\n   *\n   * Currently does not check for negative depth values.\n   *\n   * @param unregisteredCameraMatrix the camera matrix of the depth camera\n   * @param registeredCameraMatrix the camera matrix of the external camera\n   * @param registeredDistCoeffs the distortion coefficients of the external camera\n   * @param Rt the rigid body transform between the cameras. Transforms points from depth camera frame to external camera frame.\n   * @param unregisteredDepth the input depth data\n   * @param outputImagePlaneSize the image plane dimensions of the external camera (width, height)\n   * @param registeredDepth the result of transforming the depth into the external camera\n   * @param depthDilation whether or not the depth is dilated to avoid holes and occlusion errors (optional)\n   */\n  CV_EXPORTS\n  void\n  registerDepth(InputArray unregisteredCameraMatrix, InputArray registeredCameraMatrix, InputArray registeredDistCoeffs,\n                InputArray Rt, InputArray unregisteredDepth, const Size& outputImagePlaneSize,\n                OutputArray registeredDepth, bool depthDilation=false);\n\n  /**\n   * @param depth the depth image\n   * @param in_K\n   * @param in_points the list of xy coordinates\n   * @param points3d the resulting 3d points\n   */\n  CV_EXPORTS\n  void\n  depthTo3dSparse(InputArray depth, InputArray in_K, InputArray in_points, OutputArray points3d);\n\n  /** Converts a depth image to an organized set of 3d points.\n   * The coordinate system is x pointing left, y down and z away from the camera\n   * @param depth the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters\n   *              (as done with the Microsoft Kinect), otherwise, if given as CV_32F or CV_64F, it is assumed in meters)\n   * @param K The calibration matrix\n   * @param points3d the resulting 3d points. They are of depth the same as `depth` if it is CV_32F or CV_64F, and the\n   *        depth of `K` if `depth` is of depth CV_U\n   * @param mask the mask of the points to consider (can be empty)\n   */\n  CV_EXPORTS\n  void\n  depthTo3d(InputArray depth, InputArray K, OutputArray points3d, InputArray mask = noArray());\n\n  /** If the input image is of type CV_16UC1 (like the Kinect one), the image is converted to floats, divided\n   * by 1000 to get a depth in meters, and the values 0 are converted to std::numeric_limits<float>::quiet_NaN()\n   * Otherwise, the image is simply converted to floats\n   * @param in the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters\n   *              (as done with the Microsoft Kinect), it is assumed in meters)\n   * @param depth the desired output depth (floats or double)\n   * @param out The rescaled float depth image\n   */\n  CV_EXPORTS\n  void\n  rescaleDepth(InputArray in, int depth, OutputArray out);\n\n  /** Object that can compute planes in an image\n   */\n  class CV_EXPORTS RgbdPlane: public Algorithm\n  {\n  public:\n    enum RGBD_PLANE_METHOD\n    {\n      RGBD_PLANE_METHOD_DEFAULT\n    };\n\n    RgbdPlane(RGBD_PLANE_METHOD method = RGBD_PLANE_METHOD_DEFAULT)\n        :\n          method_(method),\n          block_size_(40),\n          min_size_(block_size_*block_size_),\n          threshold_(0.01),\n          sensor_error_a_(0),\n          sensor_error_b_(0),\n          sensor_error_c_(0)\n    {\n    }\n\n    /** Find The planes in a depth image\n     * @param points3d the 3d points organized like the depth image: rows x cols with 3 channels\n     * @param normals the normals for every point in the depth image\n     * @param mask An image where each pixel is labeled with the plane it belongs to\n     *        and 255 if it does not belong to any plane\n     * @param plane_coefficients the coefficients of the corresponding planes (a,b,c,d) such that ax+by+cz+d=0, norm(a,b,c)=1\n     *        and c < 0 (so that the normal points towards the camera)\n     */\n    void\n    operator()(InputArray points3d, InputArray normals, OutputArray mask,\n               OutputArray plane_coefficients);\n\n    /** Find The planes in a depth image but without doing a normal check, which is faster but less accurate\n     * @param points3d the 3d points organized like the depth image: rows x cols with 3 channels\n     * @param mask An image where each pixel is labeled with the plane it belongs to\n     *        and 255 if it does not belong to any plane\n     * @param plane_coefficients the coefficients of the corresponding planes (a,b,c,d) such that ax+by+cz+d=0\n     */\n    void\n    operator()(InputArray points3d, OutputArray mask, OutputArray plane_coefficients);\n\n    int getBlockSize() const\n    {\n        return block_size_;\n    }\n    void setBlockSize(int val)\n    {\n        block_size_ = val;\n    }\n    int getMinSize() const\n    {\n        return min_size_;\n    }\n    void setMinSize(int val)\n    {\n        min_size_ = val;\n    }\n    int getMethod() const\n    {\n        return method_;\n    }\n    void setMethod(int val)\n    {\n        method_ = val;\n    }\n    double getThreshold() const\n    {\n        return threshold_;\n    }\n    void setThreshold(double val)\n    {\n        threshold_ = val;\n    }\n    double getSensorErrorA() const\n    {\n        return sensor_error_a_;\n    }\n    void setSensorErrorA(double val)\n    {\n        sensor_error_a_ = val;\n    }\n    double getSensorErrorB() const\n    {\n        return sensor_error_b_;\n    }\n    void setSensorErrorB(double val)\n    {\n        sensor_error_b_ = val;\n    }\n    double getSensorErrorC() const\n    {\n        return sensor_error_c_;\n    }\n    void setSensorErrorC(double val)\n    {\n        sensor_error_c_ = val;\n    }\n\n  private:\n    /** The method to use to compute the planes */\n    int method_;\n    /** The size of the blocks to look at for a stable MSE */\n    int block_size_;\n    /** The minimum size of a cluster to be considered a plane */\n    int min_size_;\n    /** How far a point can be from a plane to belong to it (in meters) */\n    double threshold_;\n    /** coefficient of the sensor error with respect to the. All 0 by default but you want a=0.0075 for a Kinect */\n    double sensor_error_a_, sensor_error_b_, sensor_error_c_;\n  };\n\n  /** Object that contains a frame data.\n   */\n  struct CV_EXPORTS RgbdFrame\n  {\n      RgbdFrame();\n      RgbdFrame(const Mat& image, const Mat& depth, const Mat& mask=Mat(), const Mat& normals=Mat(), int ID=-1);\n      virtual ~RgbdFrame();\n\n      virtual void\n      release();\n\n      int ID;\n      Mat image;\n      Mat depth;\n      Mat mask;\n      Mat normals;\n  };\n\n  /** Object that contains a frame data that is possibly needed for the Odometry.\n   * It's used for the efficiency (to pass precomputed/cached data of the frame that participates\n   * in the Odometry processing several times).\n   */\n  struct CV_EXPORTS OdometryFrame : public RgbdFrame\n  {\n    /** These constants are used to set a type of cache which has to be prepared depending on the frame role:\n     * srcFrame or dstFrame (see compute method of the Odometry class). For the srcFrame and dstFrame different cache data may be required,\n     * some part of a cache may be common for both frame roles.\n     * @param CACHE_SRC The cache data for the srcFrame will be prepared.\n     * @param CACHE_DST The cache data for the dstFrame will be prepared.\n     * @param CACHE_ALL The cache data for both srcFrame and dstFrame roles will be computed.\n     */\n    enum\n    {\n      CACHE_SRC = 1, CACHE_DST = 2, CACHE_ALL = CACHE_SRC + CACHE_DST\n    };\n\n    OdometryFrame();\n    OdometryFrame(const Mat& image, const Mat& depth, const Mat& mask=Mat(), const Mat& normals=Mat(), int ID=-1);\n\n    virtual void\n    release();\n\n    void\n    releasePyramids();\n\n    std::vector<Mat> pyramidImage;\n    std::vector<Mat> pyramidDepth;\n    std::vector<Mat> pyramidMask;\n\n    std::vector<Mat> pyramidCloud;\n\n    std::vector<Mat> pyramid_dI_dx;\n    std::vector<Mat> pyramid_dI_dy;\n    std::vector<Mat> pyramidTexturedMask;\n\n    std::vector<Mat> pyramidNormals;\n    std::vector<Mat> pyramidNormalsMask;\n  };\n\n  /** Base class for computation of odometry.\n   */\n  class CV_EXPORTS Odometry: public Algorithm\n  {\n  public:\n\n    /** A class of transformation*/\n    enum\n    {\n      ROTATION = 1, TRANSLATION = 2, RIGID_BODY_MOTION = 4\n    };\n\n    static inline float\n    DEFAULT_MIN_DEPTH()\n    {\n      return 0.f; // in meters\n    }\n    static inline float\n    DEFAULT_MAX_DEPTH()\n    {\n      return 4.f; // in meters\n    }\n    static inline float\n    DEFAULT_MAX_DEPTH_DIFF()\n    {\n      return 0.07f; // in meters\n    }\n    static inline float\n    DEFAULT_MAX_POINTS_PART()\n    {\n      return 0.07f; // in [0, 1]\n    }\n    static inline float\n    DEFAULT_MAX_TRANSLATION()\n    {\n      return 0.15f; // in meters\n    }\n    static inline float\n    DEFAULT_MAX_ROTATION()\n    {\n      return 15; // in degrees\n    }\n\n    /** Method to compute a transformation from the source frame to the destination one.\n     * Some odometry algorithms do not used some data of frames (eg. ICP does not use images).\n     * In such case corresponding arguments can be set as empty Mat.\n     * The method returns true if all internal computions were possible (e.g. there were enough correspondences,\n     * system of equations has a solution, etc) and resulting transformation satisfies some test if it's provided\n     * by the Odometry inheritor implementation (e.g. thresholds for maximum translation and rotation).\n     * @param srcImage Image data of the source frame (CV_8UC1)\n     * @param srcDepth Depth data of the source frame (CV_32FC1, in meters)\n     * @param srcMask Mask that sets which pixels have to be used from the source frame (CV_8UC1)\n     * @param dstImage Image data of the destination frame (CV_8UC1)\n     * @param dstDepth Depth data of the destination frame (CV_32FC1, in meters)\n     * @param dstMask Mask that sets which pixels have to be used from the destination frame (CV_8UC1)\n     * @param Rt Resulting transformation from the source frame to the destination one (rigid body motion):\n     dst_p = Rt * src_p, where dst_p is a homogeneous point in the destination frame and src_p is\n     homogeneous point in the source frame,\n     Rt is 4x4 matrix of CV_64FC1 type.\n     * @param initRt Initial transformation from the source frame to the destination one (optional)\n     */\n    bool\n    compute(const Mat& srcImage, const Mat& srcDepth, const Mat& srcMask, const Mat& dstImage, const Mat& dstDepth,\n            const Mat& dstMask, Mat& Rt, const Mat& initRt = Mat()) const;\n\n    /** One more method to compute a transformation from the source frame to the destination one.\n     * It is designed to save on computing the frame data (image pyramids, normals, etc.).\n     */\n    bool\n    compute(Ptr<OdometryFrame>& srcFrame, Ptr<OdometryFrame>& dstFrame, Mat& Rt, const Mat& initRt = Mat()) const;\n\n    /** Prepare a cache for the frame. The function checks the precomputed/passed data (throws the error if this data\n     * does not satisfy) and computes all remaining cache data needed for the frame. Returned size is a resolution\n     * of the prepared frame.\n     * @param frame The odometry which will process the frame.\n     * @param cacheType The cache type: CACHE_SRC, CACHE_DST or CACHE_ALL.\n     */\n    virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;\n\n    static Ptr<Odometry> create(const String & odometryType);\n\n    /** @see setCameraMatrix */\n    virtual cv::Mat getCameraMatrix() const = 0;\n    /** @copybrief getCameraMatrix @see getCameraMatrix */\n    virtual void setCameraMatrix(const cv::Mat &val) = 0;\n    /** @see setTransformType */\n    virtual int getTransformType() const = 0;\n    /** @copybrief getTransformType @see getTransformType */\n    virtual void setTransformType(int val) = 0;\n\n  protected:\n    virtual void\n    checkParams() const = 0;\n\n    virtual bool\n    computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,\n                const Mat& initRt) const = 0;\n  };\n\n  /** Odometry based on the paper \"Real-Time Visual Odometry from Dense RGB-D Images\",\n   * F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011.\n   */\n  class CV_EXPORTS RgbdOdometry: public Odometry\n  {\n  public:\n    RgbdOdometry();\n    /** Constructor.\n     * @param cameraMatrix Camera matrix\n     * @param minDepth Pixels with depth less than minDepth will not be used (in meters)\n     * @param maxDepth Pixels with depth larger than maxDepth will not be used (in meters)\n     * @param maxDepthDiff Correspondences between pixels of two given frames will be filtered out\n     *                     if their depth difference is larger than maxDepthDiff (in meters)\n     * @param iterCounts Count of iterations on each pyramid level.\n     * @param minGradientMagnitudes For each pyramid level the pixels will be filtered out\n     *                              if they have gradient magnitude less than minGradientMagnitudes[level].\n     * @param maxPointsPart The method uses a random pixels subset of size frameWidth x frameHeight x pointsPart\n     * @param transformType Class of transformation\n     */\n    RgbdOdometry(const Mat& cameraMatrix, float minDepth = DEFAULT_MIN_DEPTH(), float maxDepth = DEFAULT_MAX_DEPTH(),\n                 float maxDepthDiff = DEFAULT_MAX_DEPTH_DIFF(), const std::vector<int>& iterCounts = std::vector<int>(),\n                 const std::vector<float>& minGradientMagnitudes = std::vector<float>(), float maxPointsPart = DEFAULT_MAX_POINTS_PART(),\n                 int transformType = RIGID_BODY_MOTION);\n\n    virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;\n\n    cv::Mat getCameraMatrix() const\n    {\n        return cameraMatrix;\n    }\n    void setCameraMatrix(const cv::Mat &val)\n    {\n        cameraMatrix = val;\n    }\n    double getMinDepth() const\n    {\n        return minDepth;\n    }\n    void setMinDepth(double val)\n    {\n        minDepth = val;\n    }\n    double getMaxDepth() const\n    {\n        return maxDepth;\n    }\n    void setMaxDepth(double val)\n    {\n        maxDepth = val;\n    }\n    double getMaxDepthDiff() const\n    {\n        return maxDepthDiff;\n    }\n    void setMaxDepthDiff(double val)\n    {\n        maxDepthDiff = val;\n    }\n    cv::Mat getIterationCounts() const\n    {\n        return iterCounts;\n    }\n    void setIterationCounts(const cv::Mat &val)\n    {\n        iterCounts = val;\n    }\n    cv::Mat getMinGradientMagnitudes() const\n    {\n        return minGradientMagnitudes;\n    }\n    void setMinGradientMagnitudes(const cv::Mat &val)\n    {\n        minGradientMagnitudes = val;\n    }\n    double getMaxPointsPart() const\n    {\n        return maxPointsPart;\n    }\n    void setMaxPointsPart(double val)\n    {\n        maxPointsPart = val;\n    }\n    int getTransformType() const\n    {\n        return transformType;\n    }\n    void setTransformType(int val)\n    {\n        transformType = val;\n    }\n    double getMaxTranslation() const\n    {\n        return maxTranslation;\n    }\n    void setMaxTranslation(double val)\n    {\n        maxTranslation = val;\n    }\n    double getMaxRotation() const\n    {\n        return maxRotation;\n    }\n    void setMaxRotation(double val)\n    {\n        maxRotation = val;\n    }\n\n  protected:\n    virtual void\n    checkParams() const;\n\n    virtual bool\n    computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,\n                const Mat& initRt) const;\n\n    // Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.\n    /*float*/\n    double minDepth, maxDepth, maxDepthDiff;\n    /*vector<int>*/\n    Mat iterCounts;\n    /*vector<float>*/\n    Mat minGradientMagnitudes;\n    double maxPointsPart;\n\n    Mat cameraMatrix;\n    int transformType;\n\n    double maxTranslation, maxRotation;\n  };\n\n  /** Odometry based on the paper \"KinectFusion: Real-Time Dense Surface Mapping and Tracking\",\n   * Richard A. Newcombe, Andrew Fitzgibbon, at al, SIGGRAPH, 2011.\n   */\n  class ICPOdometry: public Odometry\n  {\n  public:\n    ICPOdometry();\n    /** Constructor.\n     * @param cameraMatrix Camera matrix\n     * @param minDepth Pixels with depth less than minDepth will not be used\n     * @param maxDepth Pixels with depth larger than maxDepth will not be used\n     * @param maxDepthDiff Correspondences between pixels of two given frames will be filtered out\n     *                     if their depth difference is larger than maxDepthDiff\n     * @param maxPointsPart The method uses a random pixels subset of size frameWidth x frameHeight x pointsPart\n     * @param iterCounts Count of iterations on each pyramid level.\n     * @param transformType Class of trasformation\n     */\n    ICPOdometry(const Mat& cameraMatrix, float minDepth = DEFAULT_MIN_DEPTH(), float maxDepth = DEFAULT_MAX_DEPTH(),\n                float maxDepthDiff = DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = DEFAULT_MAX_POINTS_PART(),\n                const std::vector<int>& iterCounts = std::vector<int>(), int transformType = RIGID_BODY_MOTION);\n\n    virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;\n\n    cv::Mat getCameraMatrix() const\n    {\n        return cameraMatrix;\n    }\n    void setCameraMatrix(const cv::Mat &val)\n    {\n        cameraMatrix = val;\n    }\n    double getMinDepth() const\n    {\n        return minDepth;\n    }\n    void setMinDepth(double val)\n    {\n        minDepth = val;\n    }\n    double getMaxDepth() const\n    {\n        return maxDepth;\n    }\n    void setMaxDepth(double val)\n    {\n        maxDepth = val;\n    }\n    double getMaxDepthDiff() const\n    {\n        return maxDepthDiff;\n    }\n    void setMaxDepthDiff(double val)\n    {\n        maxDepthDiff = val;\n    }\n    cv::Mat getIterationCounts() const\n    {\n        return iterCounts;\n    }\n    void setIterationCounts(const cv::Mat &val)\n    {\n        iterCounts = val;\n    }\n    double getMaxPointsPart() const\n    {\n        return maxPointsPart;\n    }\n    void setMaxPointsPart(double val)\n    {\n        maxPointsPart = val;\n    }\n    int getTransformType() const\n    {\n        return transformType;\n    }\n    void setTransformType(int val)\n    {\n        transformType = val;\n    }\n    double getMaxTranslation() const\n    {\n        return maxTranslation;\n    }\n    void setMaxTranslation(double val)\n    {\n        maxTranslation = val;\n    }\n    double getMaxRotation() const\n    {\n        return maxRotation;\n    }\n    void setMaxRotation(double val)\n    {\n        maxRotation = val;\n    }\n    Ptr<RgbdNormals> getNormalsComputer() const\n    {\n        return normalsComputer;\n    }\n\n  protected:\n    virtual void\n    checkParams() const;\n\n    virtual bool\n    computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,\n                const Mat& initRt) const;\n\n    // Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.\n    /*float*/\n    double minDepth, maxDepth, maxDepthDiff;\n    /*float*/\n    double maxPointsPart;\n    /*vector<int>*/\n    Mat iterCounts;\n\n    Mat cameraMatrix;\n    int transformType;\n\n    double maxTranslation, maxRotation;\n\n    mutable Ptr<RgbdNormals> normalsComputer;\n  };\n\n  /** Odometry that merges RgbdOdometry and ICPOdometry by minimize sum of their energy functions.\n   */\n\n  class RgbdICPOdometry: public Odometry\n  {\n  public:\n    RgbdICPOdometry();\n    /** Constructor.\n     * @param cameraMatrix Camera matrix\n     * @param minDepth Pixels with depth less than minDepth will not be used\n     * @param maxDepth Pixels with depth larger than maxDepth will not be used\n     * @param maxDepthDiff Correspondences between pixels of two given frames will be filtered out\n     *                     if their depth difference is larger than maxDepthDiff\n     * @param maxPointsPart The method uses a random pixels subset of size frameWidth x frameHeight x pointsPart\n     * @param iterCounts Count of iterations on each pyramid level.\n     * @param minGradientMagnitudes For each pyramid level the pixels will be filtered out\n     *                              if they have gradient magnitude less than minGradientMagnitudes[level].\n     * @param transformType Class of trasformation\n     */\n    RgbdICPOdometry(const Mat& cameraMatrix, float minDepth = DEFAULT_MIN_DEPTH(), float maxDepth = DEFAULT_MAX_DEPTH(),\n                    float maxDepthDiff = DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = DEFAULT_MAX_POINTS_PART(),\n                    const std::vector<int>& iterCounts = std::vector<int>(),\n                    const std::vector<float>& minGradientMagnitudes = std::vector<float>(),\n                    int transformType = RIGID_BODY_MOTION);\n\n    virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;\n\n    cv::Mat getCameraMatrix() const\n    {\n        return cameraMatrix;\n    }\n    void setCameraMatrix(const cv::Mat &val)\n    {\n        cameraMatrix = val;\n    }\n    double getMinDepth() const\n    {\n        return minDepth;\n    }\n    void setMinDepth(double val)\n    {\n        minDepth = val;\n    }\n    double getMaxDepth() const\n    {\n        return maxDepth;\n    }\n    void setMaxDepth(double val)\n    {\n        maxDepth = val;\n    }\n    double getMaxDepthDiff() const\n    {\n        return maxDepthDiff;\n    }\n    void setMaxDepthDiff(double val)\n    {\n        maxDepthDiff = val;\n    }\n    double getMaxPointsPart() const\n    {\n        return maxPointsPart;\n    }\n    void setMaxPointsPart(double val)\n    {\n        maxPointsPart = val;\n    }\n    cv::Mat getIterationCounts() const\n    {\n        return iterCounts;\n    }\n    void setIterationCounts(const cv::Mat &val)\n    {\n        iterCounts = val;\n    }\n    cv::Mat getMinGradientMagnitudes() const\n    {\n        return minGradientMagnitudes;\n    }\n    void setMinGradientMagnitudes(const cv::Mat &val)\n    {\n        minGradientMagnitudes = val;\n    }\n    int getTransformType() const\n    {\n        return transformType;\n    }\n    void setTransformType(int val)\n    {\n        transformType = val;\n    }\n    double getMaxTranslation() const\n    {\n        return maxTranslation;\n    }\n    void setMaxTranslation(double val)\n    {\n        maxTranslation = val;\n    }\n    double getMaxRotation() const\n    {\n        return maxRotation;\n    }\n    void setMaxRotation(double val)\n    {\n        maxRotation = val;\n    }\n    Ptr<RgbdNormals> getNormalsComputer() const\n    {\n        return normalsComputer;\n    }\n\n  protected:\n    virtual void\n    checkParams() const;\n\n    virtual bool\n    computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,\n                const Mat& initRt) const;\n\n    // Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.\n    /*float*/\n    double minDepth, maxDepth, maxDepthDiff;\n    /*float*/\n    double maxPointsPart;\n    /*vector<int>*/\n    Mat iterCounts;\n    /*vector<float>*/\n    Mat minGradientMagnitudes;\n\n    Mat cameraMatrix;\n    int transformType;\n\n    double maxTranslation, maxRotation;\n\n    mutable Ptr<RgbdNormals> normalsComputer;\n  };\n\n  /** Warp the image: compute 3d points from the depth, transform them using given transformation,\n   * then project color point cloud to an image plane.\n   * This function can be used to visualize results of the Odometry algorithm.\n   * @param image The image (of CV_8UC1 or CV_8UC3 type)\n   * @param depth The depth (of type used in depthTo3d fuction)\n   * @param mask The mask of used pixels (of CV_8UC1), it can be empty\n   * @param Rt The transformation that will be applied to the 3d points computed from the depth\n   * @param cameraMatrix Camera matrix\n   * @param distCoeff Distortion coefficients\n   * @param warpedImage The warped image.\n   * @param warpedDepth The warped depth.\n   * @param warpedMask The warped mask.\n   */\n  CV_EXPORTS\n  void\n  warpFrame(const Mat& image, const Mat& depth, const Mat& mask, const Mat& Rt, const Mat& cameraMatrix,\n            const Mat& distCoeff, Mat& warpedImage, Mat* warpedDepth = 0, Mat* warpedMask = 0);\n\n// TODO Depth interpolation\n// Curvature\n// Get rescaleDepth return dubles if asked for\n\n//! @}\n\n} /* namespace rgbd */\n} /* namespace cv */\n\n#include \"opencv2/rgbd/linemod.hpp\"\n\n#endif /* __cplusplus */\n#endif\n\n/* End of file. */\n\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/saliency/saliencyBaseClasses.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2014, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_SALIENCY_BASE_CLASSES_HPP__\n#define __OPENCV_SALIENCY_BASE_CLASSES_HPP__\n\n#include \"opencv2/core.hpp\"\n#include <opencv2/core/persistence.hpp>\n#include \"opencv2/imgproc.hpp\"\n#include <iostream>\n#include <sstream>\n#include <complex>\n\nnamespace cv\n{\nnamespace saliency\n{\n\n//! @addtogroup saliency\n//! @{\n\n/************************************ Saliency Base Class ************************************/\n\nclass CV_EXPORTS Saliency : public virtual Algorithm\n{\n public:\n  /**\n   * \\brief Destructor\n   */\n  virtual ~Saliency();\n\n  /**\n   * \\brief Create Saliency by saliency type.\n   */\n  static Ptr<Saliency> create( const String& saliencyType );\n\n  /**\n   * \\brief Compute the saliency\n   * \\param image        The image.\n   * \\param saliencyMap      The computed saliency map.\n   * \\return true if the saliency map is computed, false otherwise\n   */\n  bool computeSaliency( InputArray image, OutputArray saliencyMap );\n\n  /**\n   * \\brief Get the name of the specific saliency type\n   * \\return The name of the tracker initializer\n   */\n  String getClassName() const;\n\n protected:\n\n  virtual bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap ) = 0;\n  String className;\n};\n\n/************************************ Static Saliency Base Class ************************************/\nclass CV_EXPORTS StaticSaliency : public virtual Saliency\n{\n public:\n\n    /** @brief This function perform a binary map of given saliency map. This is obtained in this\n    way:\n\n    In a first step, to improve the definition of interest areas and facilitate identification of\n    targets, a segmentation by clustering is performed, using *K-means algorithm*. Then, to gain a\n    binary representation of clustered saliency map, since values of the map can vary according to\n    the characteristics of frame under analysis, it is not convenient to use a fixed threshold. So,\n    *Otsu’s algorithm* is used, which assumes that the image to be thresholded contains two classes\n    of pixels or bi-modal histograms (e.g. foreground and back-ground pixels); later on, the\n    algorithm calculates the optimal threshold separating those two classes, so that their\n    intra-class variance is minimal.\n\n    @param saliencyMap the saliency map obtained through one of the specialized algorithms\n    @param binaryMap the binary map\n     */\n  bool computeBinaryMap( const Mat& saliencyMap, Mat& binaryMap );\n protected:\n  virtual bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap )=0;\n\n};\n\n/************************************ Motion Saliency Base Class ************************************/\nclass CV_EXPORTS MotionSaliency : public virtual Saliency\n{\n\n protected:\n  virtual bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap )=0;\n\n};\n\n/************************************ Objectness Base Class ************************************/\nclass CV_EXPORTS Objectness : public virtual Saliency\n{\n\n protected:\n  virtual bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap )=0;\n\n};\n\n//! @}\n\n} /* namespace saliency */\n} /* namespace cv */\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/saliency/saliencySpecializedClasses.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2014, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_SALIENCY_SPECIALIZED_CLASSES_HPP__\n#define __OPENCV_SALIENCY_SPECIALIZED_CLASSES_HPP__\n\n//#include \"opencv2/saliency/kyheader.hpp\"\n#include <cstdio>\n#include <string>\n#include <iostream>\n#include <stdint.h>\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\nnamespace saliency\n{\n\n//! @addtogroup saliency\n//! @{\n\n/************************************ Specific Static Saliency Specialized Classes ************************************/\n\n/** @brief the Spectral Residual approach from  @cite SR\n\nStarting from the principle of natural image statistics, this method simulate the behavior of\npre-attentive visual search. The algorithm analyze the log spectrum of each image and obtain the\nspectral residual. Then transform the spectral residual to spatial domain to obtain the saliency\nmap, which suggests the positions of proto-objects.\n */\nclass CV_EXPORTS StaticSaliencySpectralResidual : public StaticSaliency\n{\npublic:\n\n  StaticSaliencySpectralResidual();\n  virtual ~StaticSaliencySpectralResidual();\n\n  void read( const FileNode& fn );\n  void write( FileStorage& fs ) const;\n\n  int getImageWidth() const\n  {\n    return resImWidth;\n  }\n  inline void setImageWidth(int val)\n  {\n    resImWidth = val;\n  }\n  int getImageHeight() const\n  {\n    return resImHeight;\n  }\n  void setImageHeight(int val)\n  {\n    resImHeight = val;\n  }\n\nprotected:\n  bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap );\n  int resImWidth;\n  int resImHeight;\n\n};\n\n/************************************ Specific Motion Saliency Specialized Classes ************************************/\n\n/*!\n * A Fast Self-tuning Background Subtraction Algorithm.\n *\n * This background subtraction algorithm is inspired to the work of B. Wang and P. Dudek [2]\n * [2]  B. Wang and P. Dudek \"A Fast Self-tuning Background Subtraction Algorithm\", in proc of IEEE Workshop on Change Detection, 2014\n *\n */\n/** @brief the Fast Self-tuning Background Subtraction Algorithm from @cite BinWangApr2014\n */\nclass CV_EXPORTS MotionSaliencyBinWangApr2014 : public MotionSaliency\n{\npublic:\n  MotionSaliencyBinWangApr2014();\n  virtual ~MotionSaliencyBinWangApr2014();\n\n  /** @brief This is a utility function that allows to set the correct size (taken from the input image) in the\n    corresponding variables that will be used to size the data structures of the algorithm.\n    @param W width of input image\n    @param H height of input image\n  */\n  void setImagesize( int W, int H );\n  /** @brief This function allows the correct initialization of all data structures that will be used by the\n    algorithm.\n  */\n  bool init();\n\n  int getImageWidth() const\n  {\n    return imageWidth;\n  }\n  inline void setImageWidth(int val)\n  {\n    imageWidth = val;\n  }\n  int getImageHeight() const\n  {\n    return imageHeight;\n  }\n  void setImageHeight(int val)\n  {\n    imageHeight = val;\n  }\n\nprotected:\n  /** @brief Performs all the operations and calls all internal functions necessary for the accomplishment of the\n    Fast Self-tuning Background Subtraction Algorithm algorithm.\n    @param image input image. According to the needs of this specialized algorithm, the param image is a\n    single *Mat*.\n    @param saliencyMap Saliency Map. Is a binarized map that, in accordance with the nature of the algorithm, highlights the moving objects or areas of change in the scene.\n       The saliency map is given by a single *Mat* (one for each frame of an hypothetical video\n        stream).\n  */\n  bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap );\n\nprivate:\n\n  // classification (and adaptation) functions\n  bool fullResolutionDetection( const Mat& image, Mat& highResBFMask );\n  bool lowResolutionDetection( const Mat& image, Mat& lowResBFMask );\n\n  // Background model maintenance functions\n  bool templateOrdering();\n  bool templateReplacement( const Mat& finalBFMask, const Mat& image );\n\n  // changing structure\n  std::vector<Ptr<Mat> > backgroundModel;// The vector represents the background template T0---TK of reference paper.\n  // Matrices are two-channel matrix. In the first layer there are the B (background value)\n  // for each pixel. In the second layer, there are the C (efficacy) value for each pixel\n  Mat potentialBackground;// Two channel Matrix. For each pixel, in the first level there are the Ba value (potential background value)\n                          // and in the secon level there are the Ca value, the counter for each potential value.\n  Mat epslonPixelsValue;  // epslon threshold\n\n  //fixed parameter\n  bool neighborhoodCheck;\n  int N_DS;// Number of template to be downsampled and used in lowResolutionDetection function\n  int imageWidth;// Width of input image\n  int imageHeight;//Height of input image\n  int K;// Number of background model template\n  int N;// NxN is the size of the block for downsampling in the lowlowResolutionDetection\n  float alpha;// Learning rate\n  int L0, L1;// Upper-bound values for C0 and C1 (efficacy of the first two template (matrices) of backgroundModel\n  int thetaL;// T0, T1 swap threshold\n  int thetaA;// Potential background value threshold\n  int gamma;// Parameter that controls the time that the newly updated long-term background value will remain in the\n            // long-term template, regardless of any subsequent background changes. A relatively large (eg gamma=3) will\n            //restrain the generation of ghosts.\n\n};\n\n/************************************ Specific Objectness Specialized Classes ************************************/\n\n/**\n * \\brief Objectness algorithms based on [3]\n * [3] Cheng, Ming-Ming, et al. \"BING: Binarized normed gradients for objectness estimation at 300fps.\" IEEE CVPR. 2014.\n */\n\n/** @brief the Binarized normed gradients algorithm from @cite BING\n */\nclass CV_EXPORTS ObjectnessBING : public Objectness\n{\npublic:\n\n  ObjectnessBING();\n  virtual ~ObjectnessBING();\n\n  void read();\n  void write() const;\n\n  /** @brief Return the list of the rectangles' objectness value,\n\n    in the same order as the *vector\\<Vec4i\\> objectnessBoundingBox* returned by the algorithm (in\n    computeSaliencyImpl function). The bigger value these scores are, it is more likely to be an\n    object window.\n     */\n  std::vector<float> getobjectnessValues();\n\n  /** @brief This is a utility function that allows to set the correct path from which the algorithm will load\n    the trained model.\n    @param trainingPath trained model path\n     */\n  void setTrainingPath( std::string trainingPath );\n\n  /** @brief This is a utility function that allows to set an arbitrary path in which the algorithm will save the\n    optional results\n\n    (ie writing on file the total number and the list of rectangles returned by objectess, one for\n    each row).\n    @param resultsDir results' folder path\n     */\n  void setBBResDir( std::string resultsDir );\n\n  double getBase() const\n  {\n    return _base;\n  }\n  inline void setBase(double val)\n  {\n    _base = val;\n  }\n  int getNSS() const\n  {\n    return _NSS;\n  }\n  void setNSS(int val)\n  {\n    _NSS = val;\n  }\n  int getW() const\n  {\n    return _W;\n  }\n  void setW(int val)\n  {\n    _W = val;\n  }\n\nprotected:\n  /** @brief Performs all the operations and calls all internal functions necessary for the\n  accomplishment of the Binarized normed gradients algorithm.\n\n    @param image input image. According to the needs of this specialized algorithm, the param image is a\n    single *Mat*\n    @param objectnessBoundingBox objectness Bounding Box vector. According to the result given by this\n    specialized algorithm, the objectnessBoundingBox is a *vector\\<Vec4i\\>*. Each bounding box is\n    represented by a *Vec4i* for (minX, minY, maxX, maxY).\n     */\n  bool computeSaliencyImpl( InputArray image, OutputArray objectnessBoundingBox );\n\nprivate:\n\n  class FilterTIG\n  {\n  public:\n    void update( Mat &w );\n\n    // For a W by H gradient magnitude map, find a W-7 by H-7 CV_32F matching score map\n    Mat matchTemplate( const Mat &mag1u );\n\n    float dot( int64_t tig1, int64_t tig2, int64_t tig4, int64_t tig8 );\n    void reconstruct( Mat &w );// For illustration purpose\n\n  private:\n    static const int NUM_COMP = 2;// Number of components\n    static const int D = 64;// Dimension of TIG\n    int64_t _bTIGs[NUM_COMP];// Binary TIG features\n    float _coeffs1[NUM_COMP];// Coefficients of binary TIG features\n\n    // For efficiently deals with different bits in CV_8U gradient map\n    float _coeffs2[NUM_COMP], _coeffs4[NUM_COMP], _coeffs8[NUM_COMP];\n  };\n\n  template<typename VT, typename ST>\n  struct ValStructVec\n  {\n    ValStructVec();\n    int size() const;\n    void clear();\n    void reserve( int resSz );\n    void pushBack( const VT& val, const ST& structVal );\n    const VT& operator ()( int i ) const;\n    const ST& operator []( int i ) const;\n    VT& operator ()( int i );\n    ST& operator []( int i );\n\n    void sort( bool descendOrder = true );\n    const std::vector<ST> &getSortedStructVal();\n    std::vector<std::pair<VT, int> > getvalIdxes();\n    void append( const ValStructVec<VT, ST> &newVals, int startV = 0 );\n\n    std::vector<ST> structVals;  // struct values\n    int sz;// size of the value struct vector\n    std::vector<std::pair<VT, int> > valIdxes;// Indexes after sort\n    bool smaller()\n    {\n      return true;\n    }\n    std::vector<ST> sortedStructVals;\n  };\n\n  enum\n  {\n    MAXBGR,\n    HSV,\n    G\n  };\n\n  double _base, _logBase;  // base for window size quantization\n  int _W;// As described in the paper: #Size, Size(_W, _H) of feature window.\n  int _NSS;// Size for non-maximal suppress\n  int _maxT, _minT, _numT;// The minimal and maximal dimensions of the template\n\n  int _Clr;//\n  static const char* _clrName[3];\n\n  // Names and paths to read model and to store results\n  std::string _modelName, _bbResDir, _trainingPath, _resultsDir;\n\n  std::vector<int> _svmSzIdxs;// Indexes of active size. It's equal to _svmFilters.size() and _svmReW1f.rows\n  Mat _svmFilter;// Filters learned at stage I, each is a _H by _W CV_32F matrix\n  FilterTIG _tigF;// TIG filter\n  Mat _svmReW1f;// Re-weight parameters learned at stage II.\n\n  // List of the rectangles' objectness value, in the same order as\n  // the  vector<Vec4i> objectnessBoundingBox returned by the algorithm (in computeSaliencyImpl function)\n  std::vector<float> objectnessValues;\n\nprivate:\n  // functions\n\n  inline static float LoG( float x, float y, float delta )\n  {\n    float d = - ( x * x + y * y ) / ( 2 * delta * delta );\n    return -1.0f / ( (float) ( CV_PI ) * pow( delta, 4 ) ) * ( 1 + d ) * exp( d );\n  }  // Laplacian of Gaussian\n\n  // Read matrix from binary file\n  static bool matRead( const std::string& filename, Mat& M );\n\n  void setColorSpace( int clr = MAXBGR );\n\n  // Load trained model.\n  int loadTrainedModel( std::string modelName = \"\" );// Return -1, 0, or 1 if partial, none, or all loaded\n\n  // Get potential bounding boxes, each of which is represented by a Vec4i for (minX, minY, maxX, maxY).\n  // The trained model should be prepared before calling this function: loadTrainedModel() or trainStageI() + trainStageII().\n  // Use numDet to control the final number of proposed bounding boxes, and number of per size (scale and aspect ratio)\n  void getObjBndBoxes( Mat &img3u, ValStructVec<float, Vec4i> &valBoxes, int numDetPerSize = 120 );\n  void getObjBndBoxesForSingleImage( Mat img, ValStructVec<float, Vec4i> &boxes, int numDetPerSize );\n\n  bool filtersLoaded()\n  {\n    int n = (int) _svmSzIdxs.size();\n    return n > 0 && _svmReW1f.size() == Size( 2, n ) && _svmFilter.size() == Size( _W, _W );\n  }\n  void predictBBoxSI( Mat &mag3u, ValStructVec<float, Vec4i> &valBoxes, std::vector<int> &sz, int NUM_WIN_PSZ = 100, bool fast = true );\n  void predictBBoxSII( ValStructVec<float, Vec4i> &valBoxes, const std::vector<int> &sz );\n\n  // Calculate the image gradient: center option as in VLFeat\n  void gradientMag( Mat &imgBGR3u, Mat &mag1u );\n\n  static void gradientRGB( Mat &bgr3u, Mat &mag1u );\n  static void gradientGray( Mat &bgr3u, Mat &mag1u );\n  static void gradientHSV( Mat &bgr3u, Mat &mag1u );\n  static void gradientXY( Mat &x1i, Mat &y1i, Mat &mag1u );\n\n  static inline int bgrMaxDist( const Vec3b &u, const Vec3b &v )\n  {\n    int b = abs( u[0] - v[0] ), g = abs( u[1] - v[1] ), r = abs( u[2] - v[2] );\n    b = max( b, g );\n    return max( b, r );\n  }\n  static inline int vecDist3b( const Vec3b &u, const Vec3b &v )\n  {\n    return abs( u[0] - v[0] ) + abs( u[1] - v[1] ) + abs( u[2] - v[2] );\n  }\n\n  //Non-maximal suppress\n  static void nonMaxSup( Mat &matchCost1f, ValStructVec<float, Point> &matchCost, int NSS = 1, int maxPoint = 50, bool fast = true );\n\n};\n\n//! @}\n\n}\n/* namespace saliency */\n} /* namespace cv */\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/saliency.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2014, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_SALIENCY_HPP__\n#define __OPENCV_SALIENCY_HPP__\n\n#include \"opencv2/saliency/saliencyBaseClasses.hpp\"\n#include \"opencv2/saliency/saliencySpecializedClasses.hpp\"\n\n/** @defgroup saliency Saliency API\n\nMany computer vision applications may benefit from understanding where humans focus given a scene.\nOther than cognitively understanding the way human perceive images and scenes, finding salient\nregions and objects in the images helps various tasks such as speeding up object detection, object\nrecognition, object tracking and content-aware image editing.\n\nAbout the saliency, there is a rich literature but the development is very fragmented. The principal\npurpose of this API is to give a unique interface, a unique framework for use and plug sever\nsaliency algorithms, also with very different nature and methodology, but they share the same\npurpose, organizing algorithms into three main categories:\n\n**Static Saliency**: algorithms belonging to this category, exploit different image features that\nallow to detect salient objects in a non dynamic scenarios.\n\n**Motion Saliency**: algorithms belonging to this category, are particularly focused to detect\nsalient objects over time (hence also over frame), then there is a temporal component sealing\ncosider that allows to detect \"moving\" objects as salient, meaning therefore also the more general\nsense of detection the changes in the scene.\n\n**Objectness**: Objectness is usually represented as a value which reflects how likely an image\nwindow covers an object of any category. Algorithms belonging to this category, avoid making\ndecisions early on, by proposing a small number of category-independent proposals, that are expected\nto cover all objects in an image. Being able to perceive objects before identifying them is closely\nrelated to bottom up visual attention (saliency).\n\n![Saliency diagram](pics/saliency.png)\n\nTo see how API works, try tracker demo:\n<https://github.com/fpuja/opencv_contrib/blob/saliencyModuleDevelop/modules/saliency/samples/computeSaliency.cpp>\n\n@note This API has been designed with PlantUML. If you modify this API please change UML.\n\n*/\n\n#endif //__OPENCV_SALIENCY_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/shape/emdL1.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_EMD_L1_HPP__\n#define __OPENCV_EMD_L1_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\n/****************************************************************************************\\\n*                                   EMDL1 Function                                      *\n\\****************************************************************************************/\n\n//! @addtogroup shape\n//! @{\n\n/** @brief Computes the \"minimal work\" distance between two weighted point configurations base on the papers\n\"EMD-L1: An efficient and Robust Algorithm for comparing histogram-based descriptors\", by Haibin\nLing and Kazunori Okuda; and \"The Earth Mover's Distance is the Mallows Distance: Some Insights from\nStatistics\", by Elizaveta Levina and Peter Bickel.\n\n@param signature1 First signature, a single column floating-point matrix. Each row is the value of\nthe histogram in each bin.\n@param signature2 Second signature of the same format and size as signature1.\n */\nCV_EXPORTS float EMDL1(InputArray signature1, InputArray signature2);\n\n//! @}\n\n}//namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/shape/hist_cost.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_HIST_COST_HPP__\n#define __OPENCV_HIST_COST_HPP__\n\n#include \"opencv2/imgproc.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup shape\n//! @{\n\n/** @brief Abstract base class for histogram cost algorithms.\n */\nclass CV_EXPORTS_W HistogramCostExtractor : public Algorithm\n{\npublic:\n    CV_WRAP virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix) = 0;\n\n    CV_WRAP virtual void setNDummies(int nDummies) = 0;\n    CV_WRAP virtual int getNDummies() const = 0;\n\n    CV_WRAP virtual void setDefaultCost(float defaultCost) = 0;\n    CV_WRAP virtual float getDefaultCost() const = 0;\n};\n\n/** @brief A norm based cost extraction. :\n */\nclass CV_EXPORTS_W NormHistogramCostExtractor : public HistogramCostExtractor\n{\npublic:\n    CV_WRAP virtual void setNormFlag(int flag) = 0;\n    CV_WRAP virtual int getNormFlag() const = 0;\n};\n\nCV_EXPORTS_W Ptr<HistogramCostExtractor>\n    createNormHistogramCostExtractor(int flag=DIST_L2, int nDummies=25, float defaultCost=0.2f);\n\n/** @brief An EMD based cost extraction. :\n */\nclass CV_EXPORTS_W EMDHistogramCostExtractor : public HistogramCostExtractor\n{\npublic:\n    CV_WRAP virtual void setNormFlag(int flag) = 0;\n    CV_WRAP virtual int getNormFlag() const = 0;\n};\n\nCV_EXPORTS_W Ptr<HistogramCostExtractor>\n    createEMDHistogramCostExtractor(int flag=DIST_L2, int nDummies=25, float defaultCost=0.2f);\n\n/** @brief An Chi based cost extraction. :\n */\nclass CV_EXPORTS_W ChiHistogramCostExtractor : public HistogramCostExtractor\n{};\n\nCV_EXPORTS_W Ptr<HistogramCostExtractor> createChiHistogramCostExtractor(int nDummies=25, float defaultCost=0.2f);\n\n/** @brief An EMD-L1 based cost extraction. :\n */\nclass CV_EXPORTS_W EMDL1HistogramCostExtractor : public HistogramCostExtractor\n{};\n\nCV_EXPORTS_W Ptr<HistogramCostExtractor>\n    createEMDL1HistogramCostExtractor(int nDummies=25, float defaultCost=0.2f);\n\n//! @}\n\n} // cv\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/shape/shape.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/shape.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/shape/shape_distance.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_SHAPE_SHAPE_DISTANCE_HPP__\n#define __OPENCV_SHAPE_SHAPE_DISTANCE_HPP__\n#include \"opencv2/core.hpp\"\n#include \"opencv2/shape/hist_cost.hpp\"\n#include \"opencv2/shape/shape_transformer.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup shape\n//! @{\n\n/** @brief Abstract base class for shape distance algorithms.\n */\nclass CV_EXPORTS_W ShapeDistanceExtractor : public Algorithm\n{\npublic:\n    /** @brief Compute the shape distance between two shapes defined by its contours.\n\n    @param contour1 Contour defining first shape.\n    @param contour2 Contour defining second shape.\n     */\n    CV_WRAP virtual float computeDistance(InputArray contour1, InputArray contour2) = 0;\n};\n\n/***********************************************************************************/\n/***********************************************************************************/\n/***********************************************************************************/\n/** @brief Implementation of the Shape Context descriptor and matching algorithm\n\nproposed by Belongie et al. in \"Shape Matching and Object Recognition Using Shape Contexts\" (PAMI\n2002). This implementation is packaged in a generic scheme, in order to allow you the\nimplementation of the common variations of the original pipeline.\n*/\nclass CV_EXPORTS_W ShapeContextDistanceExtractor : public ShapeDistanceExtractor\n{\npublic:\n    /** @brief Establish the number of angular bins for the Shape Context Descriptor used in the shape matching\n    pipeline.\n\n    @param nAngularBins The number of angular bins in the shape context descriptor.\n     */\n    CV_WRAP virtual void setAngularBins(int nAngularBins) = 0;\n    CV_WRAP virtual int getAngularBins() const = 0;\n\n    /** @brief Establish the number of radial bins for the Shape Context Descriptor used in the shape matching\n    pipeline.\n\n    @param nRadialBins The number of radial bins in the shape context descriptor.\n     */\n    CV_WRAP virtual void setRadialBins(int nRadialBins) = 0;\n    CV_WRAP virtual int getRadialBins() const = 0;\n\n    /** @brief Set the inner radius of the shape context descriptor.\n\n    @param innerRadius The value of the inner radius.\n     */\n    CV_WRAP virtual void setInnerRadius(float innerRadius) = 0;\n    CV_WRAP virtual float getInnerRadius() const = 0;\n\n    /** @brief Set the outer radius of the shape context descriptor.\n\n    @param outerRadius The value of the outer radius.\n     */\n    CV_WRAP virtual void setOuterRadius(float outerRadius) = 0;\n    CV_WRAP virtual float getOuterRadius() const = 0;\n\n    CV_WRAP virtual void setRotationInvariant(bool rotationInvariant) = 0;\n    CV_WRAP virtual bool getRotationInvariant() const = 0;\n\n    /** @brief Set the weight of the shape context distance in the final value of the shape distance. The shape\n    context distance between two shapes is defined as the symmetric sum of shape context matching costs\n    over best matching points. The final value of the shape distance is a user-defined linear\n    combination of the shape context distance, an image appearance distance, and a bending energy.\n\n    @param shapeContextWeight The weight of the shape context distance in the final distance value.\n     */\n    CV_WRAP virtual void setShapeContextWeight(float shapeContextWeight) = 0;\n    CV_WRAP virtual float getShapeContextWeight() const = 0;\n\n    /** @brief Set the weight of the Image Appearance cost in the final value of the shape distance. The image\n    appearance cost is defined as the sum of squared brightness differences in Gaussian windows around\n    corresponding image points. The final value of the shape distance is a user-defined linear\n    combination of the shape context distance, an image appearance distance, and a bending energy. If\n    this value is set to a number different from 0, is mandatory to set the images that correspond to\n    each shape.\n\n    @param imageAppearanceWeight The weight of the appearance cost in the final distance value.\n     */\n    CV_WRAP virtual void setImageAppearanceWeight(float imageAppearanceWeight) = 0;\n    CV_WRAP virtual float getImageAppearanceWeight() const = 0;\n\n    /** @brief Set the weight of the Bending Energy in the final value of the shape distance. The bending energy\n    definition depends on what transformation is being used to align the shapes. The final value of the\n    shape distance is a user-defined linear combination of the shape context distance, an image\n    appearance distance, and a bending energy.\n\n    @param bendingEnergyWeight The weight of the Bending Energy in the final distance value.\n     */\n    CV_WRAP virtual void setBendingEnergyWeight(float bendingEnergyWeight) = 0;\n    CV_WRAP virtual float getBendingEnergyWeight() const = 0;\n\n    /** @brief Set the images that correspond to each shape. This images are used in the calculation of the Image\n    Appearance cost.\n\n    @param image1 Image corresponding to the shape defined by contours1.\n    @param image2 Image corresponding to the shape defined by contours2.\n     */\n    CV_WRAP virtual void setImages(InputArray image1, InputArray image2) = 0;\n    CV_WRAP virtual void getImages(OutputArray image1, OutputArray image2) const = 0;\n\n    CV_WRAP virtual void setIterations(int iterations) = 0;\n    CV_WRAP virtual int getIterations() const = 0;\n\n    /** @brief Set the algorithm used for building the shape context descriptor cost matrix.\n\n    @param comparer Smart pointer to a HistogramCostExtractor, an algorithm that defines the cost\n    matrix between descriptors.\n     */\n    CV_WRAP virtual void setCostExtractor(Ptr<HistogramCostExtractor> comparer) = 0;\n    CV_WRAP virtual Ptr<HistogramCostExtractor> getCostExtractor() const = 0;\n\n    /** @brief Set the value of the standard deviation for the Gaussian window for the image appearance cost.\n\n    @param sigma Standard Deviation.\n     */\n    CV_WRAP virtual void setStdDev(float sigma) = 0;\n    CV_WRAP virtual float getStdDev() const = 0;\n\n    /** @brief Set the algorithm used for aligning the shapes.\n\n    @param transformer Smart pointer to a ShapeTransformer, an algorithm that defines the aligning\n    transformation.\n     */\n    CV_WRAP virtual void setTransformAlgorithm(Ptr<ShapeTransformer> transformer) = 0;\n    CV_WRAP virtual Ptr<ShapeTransformer> getTransformAlgorithm() const = 0;\n};\n\n/* Complete constructor */\nCV_EXPORTS_W Ptr<ShapeContextDistanceExtractor>\n    createShapeContextDistanceExtractor(int nAngularBins=12, int nRadialBins=4,\n                                        float innerRadius=0.2f, float outerRadius=2, int iterations=3,\n                                        const Ptr<HistogramCostExtractor> &comparer = createChiHistogramCostExtractor(),\n                                        const Ptr<ShapeTransformer> &transformer = createThinPlateSplineShapeTransformer());\n\n/***********************************************************************************/\n/***********************************************************************************/\n/***********************************************************************************/\n/** @brief A simple Hausdorff distance measure between shapes defined by contours\n\naccording to the paper \"Comparing Images using the Hausdorff distance.\" by D.P. Huttenlocher, G.A.\nKlanderman, and W.J. Rucklidge. (PAMI 1993). :\n */\nclass CV_EXPORTS_W HausdorffDistanceExtractor : public ShapeDistanceExtractor\n{\npublic:\n    /** @brief Set the norm used to compute the Hausdorff value between two shapes. It can be L1 or L2 norm.\n\n    @param distanceFlag Flag indicating which norm is used to compute the Hausdorff distance\n    (NORM_L1, NORM_L2).\n     */\n    CV_WRAP virtual void setDistanceFlag(int distanceFlag) = 0;\n    CV_WRAP virtual int getDistanceFlag() const = 0;\n\n    /** @brief This method sets the rank proportion (or fractional value) that establish the Kth ranked value of\n    the partial Hausdorff distance. Experimentally had been shown that 0.6 is a good value to compare\n    shapes.\n\n    @param rankProportion fractional value (between 0 and 1).\n     */\n    CV_WRAP virtual void setRankProportion(float rankProportion) = 0;\n    CV_WRAP virtual float getRankProportion() const = 0;\n};\n\n/* Constructor */\nCV_EXPORTS_W Ptr<HausdorffDistanceExtractor> createHausdorffDistanceExtractor(int distanceFlag=cv::NORM_L2, float rankProp=0.6f);\n\n//! @}\n\n} // cv\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/shape/shape_transformer.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_SHAPE_SHAPE_TRANSFORM_HPP__\n#define __OPENCV_SHAPE_SHAPE_TRANSFORM_HPP__\n#include <vector>\n#include \"opencv2/core.hpp\"\n#include \"opencv2/imgproc.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup shape\n//! @{\n\n/** @brief Abstract base class for shape transformation algorithms.\n */\nclass CV_EXPORTS_W ShapeTransformer : public Algorithm\n{\npublic:\n    /** @brief Estimate the transformation parameters of the current transformer algorithm, based on point matches.\n\n    @param transformingShape Contour defining first shape.\n    @param targetShape Contour defining second shape (Target).\n    @param matches Standard vector of Matches between points.\n     */\n    CV_WRAP virtual void estimateTransformation(InputArray transformingShape, InputArray targetShape,\n                                                 std::vector<DMatch>& matches) = 0;\n\n    /** @brief Apply a transformation, given a pre-estimated transformation parameters.\n\n    @param input Contour (set of points) to apply the transformation.\n    @param output Output contour.\n     */\n    CV_WRAP virtual float applyTransformation(InputArray input, OutputArray output=noArray()) = 0;\n\n    /** @brief Apply a transformation, given a pre-estimated transformation parameters, to an Image.\n\n    @param transformingImage Input image.\n    @param output Output image.\n    @param flags Image interpolation method.\n    @param borderMode border style.\n    @param borderValue border value.\n     */\n    CV_WRAP virtual void warpImage(InputArray transformingImage, OutputArray output,\n                                   int flags=INTER_LINEAR, int borderMode=BORDER_CONSTANT,\n                                   const Scalar& borderValue=Scalar()) const = 0;\n};\n\n/***********************************************************************************/\n/***********************************************************************************/\n\n/** @brief Definition of the transformation\n\nocupied in the paper \"Principal Warps: Thin-Plate Splines and Decomposition of Deformations\", by\nF.L. Bookstein (PAMI 1989). :\n */\nclass CV_EXPORTS_W ThinPlateSplineShapeTransformer : public ShapeTransformer\n{\npublic:\n    /** @brief Set the regularization parameter for relaxing the exact interpolation requirements of the TPS\n    algorithm.\n\n    @param beta value of the regularization parameter.\n     */\n    CV_WRAP virtual void setRegularizationParameter(double beta) = 0;\n    CV_WRAP virtual double getRegularizationParameter() const = 0;\n};\n\n/** Complete constructor */\nCV_EXPORTS_W Ptr<ThinPlateSplineShapeTransformer>\n    createThinPlateSplineShapeTransformer(double regularizationParameter=0);\n\n/***********************************************************************************/\n/***********************************************************************************/\n\n/** @brief Wrapper class for the OpenCV Affine Transformation algorithm. :\n */\nclass CV_EXPORTS_W AffineTransformer : public ShapeTransformer\n{\npublic:\n    CV_WRAP virtual void setFullAffine(bool fullAffine) = 0;\n    CV_WRAP virtual bool getFullAffine() const = 0;\n};\n\n/** Complete constructor */\nCV_EXPORTS_W Ptr<AffineTransformer> createAffineTransformer(bool fullAffine);\n\n//! @}\n\n} // cv\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/shape.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_SHAPE_HPP__\n#define __OPENCV_SHAPE_HPP__\n\n#include \"opencv2/shape/emdL1.hpp\"\n#include \"opencv2/shape/shape_transformer.hpp\"\n#include \"opencv2/shape/hist_cost.hpp\"\n#include \"opencv2/shape/shape_distance.hpp\"\n\n/**\n  @defgroup shape Shape Distance and Matching\n */\n\n#endif\n\n/* End of file. */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stereo/descriptor.hpp",
    "content": "//By downloading, copying, installing or using the software you agree to this license.\n//If you do not agree to this license, do not download, install,\n//copy or use the software.\n//\n//\n//                          License Agreement\n//               For Open Source Computer Vision Library\n//                       (3-clause BSD License)\n//\n//Copyright (C) 2000-2015, Intel Corporation, all rights reserved.\n//Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n//Copyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.\n//Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.\n//Copyright (C) 2015, OpenCV Foundation, all rights reserved.\n//Copyright (C) 2015, Itseez Inc., all rights reserved.\n//Third party copyrights are property of their respective owners.\n//\n//Redistribution and use in source and binary forms, with or without modification,\n//are permitted provided that the following conditions are met:\n//\n//  * Redistributions of source code must retain the above copyright notice,\n//    this list of conditions and the following disclaimer.\n//\n//  * Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n//  * Neither the names of the copyright holders nor the names of the contributors\n//    may be used to endorse or promote products derived from this software\n//    without specific prior written permission.\n//\n//This software is provided by the copyright holders and contributors \"as is\" and\n//any express or implied warranties, including, but not limited to, the implied\n//warranties of merchantability and fitness for a particular purpose are disclaimed.\n//In no event shall copyright holders or contributors be liable for any direct,\n//indirect, incidental, special, exemplary, or consequential damages\n//(including, but not limited to, procurement of substitute goods or services;\n//loss of use, data, or profits; or business interruption) however caused\n//and on any theory of liability, whether in contract, strict liability,\n//or tort (including negligence or otherwise) arising in any way out of\n//the use of this software, even if advised of the possibility of such damage.\n\n/*****************************************************************************************************************\\\n*   The interface contains the main descriptors that will be implemented in the descriptor class                  *\n\\*****************************************************************************************************************/\n\n#include <stdint.h>\n#ifndef _OPENCV_DESCRIPTOR_HPP_\n#define _OPENCV_DESCRIPTOR_HPP_\n#ifdef __cplusplus\n\nnamespace cv\n{\n    namespace stereo\n    {\n        //types of supported kernels\n        enum {\n            CV_DENSE_CENSUS, CV_SPARSE_CENSUS,\n            CV_CS_CENSUS, CV_MODIFIED_CS_CENSUS, CV_MODIFIED_CENSUS_TRANSFORM,\n            CV_MEAN_VARIATION, CV_STAR_KERNEL\n        };\n        //!Mean Variation is a robust kernel that compares a pixel\n        //!not just with the center but also with the mean of the window\n        template<int num_images>\n        struct MVKernel\n        {\n            uint8_t *image[num_images];\n            int *integralImage[num_images];\n            int stop;\n            MVKernel(){}\n            MVKernel(uint8_t **images, int **integral)\n            {\n                for(int i = 0; i < num_images; i++)\n                {\n                    image[i] = images[i];\n                    integralImage[i] = integral[i];\n                }\n                stop = num_images;\n            }\n            void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const\n            {\n                (void)w2;\n                for (int i = 0; i < stop; i++)\n                {\n                    if (image[i][rrWidth + jj] > image[i][rWidth + j])\n                    {\n                        c[i] = c[i] + 1;\n                    }\n                    c[i] = c[i] << 1;\n                    if (integralImage[i][rrWidth + jj] > image[i][rWidth + j])\n                    {\n                        c[i] = c[i] + 1;\n                    }\n                    c[i] = c[i] << 1;\n                }\n            }\n        };\n        //!Compares pixels from a patch giving high weights to pixels in which\n        //!the intensity is higher. The other pixels receive a lower weight\n        template <int num_images>\n        struct MCTKernel\n        {\n            uint8_t *image[num_images];\n            int t,imageStop;\n            MCTKernel(){}\n            MCTKernel(uint8_t ** images, int threshold)\n            {\n                for(int i = 0; i < num_images; i++)\n                {\n                    image[i] = images[i];\n                }\n                imageStop = num_images;\n                t = threshold;\n            }\n            void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const\n            {\n                (void)w2;\n                for(int i = 0; i < imageStop; i++)\n                {\n                    if (image[i][rrWidth + jj] > image[i][rWidth + j] - t)\n                    {\n                        c[i] = c[i] << 1;\n                        c[i] = c[i] + 1;\n                        c[i] = c[i] << 1;\n                        c[i] = c[i] + 1;\n                    }\n                    else if (image[i][rWidth + j] - t < image[i][rrWidth + jj] && image[i][rWidth + j] + t >= image[i][rrWidth + jj])\n                    {\n                        c[i] = c[i] << 2;\n                        c[i] = c[i] + 1;\n                    }\n                    else\n                    {\n                        c[i] <<= 2;\n                    }\n                }\n            }\n        };\n        //!A madified cs census that compares a pixel with the imediat neightbour starting\n        //!from the center\n        template<int num_images>\n        struct ModifiedCsCensus\n        {\n            uint8_t *image[num_images];\n            int n2;\n            int imageStop;\n            ModifiedCsCensus(){}\n            ModifiedCsCensus(uint8_t **images, int ker)\n            {\n                for(int i = 0; i < num_images; i++)\n                    image[i] = images[i];\n                imageStop = num_images;\n                n2 = ker;\n            }\n            void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const\n            {\n                (void)j;\n                (void)rWidth;\n                for(int i = 0; i < imageStop; i++)\n                {\n                    if (image[i][(rrWidth + jj)] > image[i][(w2 + (jj + n2))])\n                    {\n                        c[i] = c[i] + 1;\n                    }\n                    c[i] = c[i] * 2;\n                }\n            }\n        };\n        //!A kernel in which a pixel is compared with the center of the window\n        template<int num_images>\n        struct CensusKernel\n        {\n            uint8_t *image[num_images];\n            int imageStop;\n            CensusKernel(){}\n            CensusKernel(uint8_t **images)\n            {\n                for(int i = 0; i < num_images; i++)\n                    image[i] = images[i];\n                imageStop = num_images;\n            }\n            void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const\n            {\n                (void)w2;\n                for(int i = 0; i < imageStop; i++)\n                {\n                    ////compare a pixel with the center from the kernel\n                    if (image[i][rrWidth + jj] > image[i][rWidth + j])\n                    {\n                        c[i] += 1;\n                    }\n                    c[i] <<= 1;\n                }\n            }\n        };\n        //template clas which efficiently combines the descriptors\n        template <int step_start, int step_end, int step_inc,int nr_img, typename Kernel>\n        class CombinedDescriptor:public ParallelLoopBody\n        {\n        private:\n            int width, height,n2;\n            int stride_;\n            int *dst[nr_img];\n            Kernel kernel_;\n            int n2_stop;\n        public:\n            CombinedDescriptor(int w, int h,int stride, int k2, int **distance, Kernel kernel,int k2Stop)\n            {\n                width = w;\n                height = h;\n                n2 = k2;\n                stride_ = stride;\n                for(int i = 0; i < nr_img; i++)\n                    dst[i] = distance[i];\n                kernel_ = kernel;\n                n2_stop = k2Stop;\n            }\n            void operator()(const cv::Range &r) const {\n                for (int i = r.start; i <= r.end ; i++)\n                {\n                    int rWidth = i * stride_;\n                    for (int j = n2 + 2; j <= width - n2 - 2; j++)\n                    {\n                        int c[nr_img];\n                        memset(c,0,nr_img);\n                        for(int step = step_start; step <= step_end; step += step_inc)\n                        {\n                            for (int ii = - n2; ii <= + n2_stop; ii += step)\n                            {\n                                int rrWidth = (ii + i) * stride_;\n                                int rrWidthC = (ii + i + n2) * stride_;\n                                for (int jj = j - n2; jj <= j + n2; jj += step)\n                                {\n                                    if (ii != i || jj != j)\n                                    {\n                                        kernel_(rrWidth,rrWidthC, rWidth, jj, j,c);\n                                    }\n                                }\n                            }\n                        }\n                        for(int l = 0; l < nr_img; l++)\n                            dst[l][rWidth + j] = c[l];\n                    }\n                }\n            }\n        };\n        //!calculate the mean of every windowSizexWindwoSize block from the integral Image\n        //!this is a preprocessing for MV kernel\n        class MeanKernelIntegralImage : public ParallelLoopBody\n        {\n        private:\n            int *img;\n            int windowSize,width;\n            float scalling;\n            int *c;\n        public:\n            MeanKernelIntegralImage(const cv::Mat &image, int window,float scale, int *cost):\n                img((int *)image.data),windowSize(window) ,width(image.cols) ,scalling(scale) , c(cost){};\n            void operator()(const cv::Range &r) const{\n                for (int i = r.start; i <= r.end; i++)\n                {\n                    int iw = i * width;\n                    for (int j = windowSize + 1; j <= width - windowSize - 1; j++)\n                    {\n                        c[iw + j] = (int)((img[(i + windowSize - 1) * width + j + windowSize - 1] + img[(i - windowSize - 1) * width + j - windowSize - 1]\n                        - img[(i + windowSize) * width + j - windowSize] - img[(i - windowSize) * width + j + windowSize]) * scalling);\n                    }\n                }\n            }\n        };\n        //!implementation for the star kernel descriptor\n        template<int num_images>\n        class StarKernelCensus:public ParallelLoopBody\n        {\n        private:\n            uint8_t *image[num_images];\n            int *dst[num_images];\n            int n2, width, height, im_num,stride_;\n        public:\n            StarKernelCensus(const cv::Mat *img, int k2, int **distance)\n            {\n                for(int i = 0; i < num_images; i++)\n                {\n                    image[i] = img[i].data;\n                    dst[i] = distance[i];\n                }\n                n2 = k2;\n                width = img[0].cols;\n                height = img[0].rows;\n                im_num = num_images;\n                stride_ = (int)img[0].step;\n            }\n            void operator()(const cv::Range &r) const {\n                for (int i = r.start; i <= r.end ; i++)\n                {\n                    int rWidth = i * stride_;\n                    for (int j = n2; j <= width - n2; j++)\n                    {\n                        for(int d = 0 ; d < im_num; d++)\n                        {\n                            int c = 0;\n                            for(int step = 4; step > 0; step--)\n                            {\n                                for (int ii = i - step; ii <= i + step; ii += step)\n                                {\n                                    int rrWidth = ii * stride_;\n                                    for (int jj = j - step; jj <= j + step; jj += step)\n                                    {\n                                        if (image[d][rrWidth + jj] > image[d][rWidth + j])\n                                        {\n                                            c = c + 1;\n                                        }\n                                        c = c * 2;\n                                    }\n                                }\n                            }\n                            for (int ii = -1; ii <= +1; ii++)\n                            {\n                                int rrWidth = (ii + i) * stride_;\n                                if (i == -1)\n                                {\n                                    if (ii + i != i)\n                                    {\n                                        if (image[d][rrWidth + j] > image[d][rWidth + j])\n                                        {\n                                            c = c + 1;\n                                        }\n                                        c = c * 2;\n                                    }\n                                }\n                                else if (i == 0)\n                                {\n                                    for (int j2 = -1; j2 <= 1; j2 += 2)\n                                    {\n                                        if (ii + i != i)\n                                        {\n                                            if (image[d][rrWidth + j + j2] > image[d][rWidth + j])\n                                            {\n                                                c = c + 1;\n                                            }\n                                            c = c * 2;\n                                        }\n                                    }\n                                }\n                                else\n                                {\n                                    if (ii + i != i)\n                                    {\n                                        if (image[d][rrWidth + j] > image[d][rWidth + j])\n                                        {\n                                            c = c + 1;\n                                        }\n                                        c = c * 2;\n                                    }\n                                }\n                            }\n                            dst[d][rWidth + j] = c;\n                        }\n                    }\n                }\n            }\n        };\n        //!paralel implementation of the center symetric census\n        template <int num_images>\n        class SymetricCensus:public ParallelLoopBody\n        {\n        private:\n            uint8_t *image[num_images];\n            int *dst[num_images];\n            int n2, width, height, im_num,stride_;\n        public:\n            SymetricCensus(const cv::Mat *img, int k2, int **distance)\n            {\n                for(int i = 0; i < num_images; i++)\n                {\n                    image[i] = img[i].data;\n                    dst[i] = distance[i];\n                }\n                n2 = k2;\n                width = img[0].cols;\n                height = img[0].rows;\n                im_num = num_images;\n                stride_ = (int)img[0].step;\n            }\n            void operator()(const cv::Range &r) const {\n                for (int i = r.start; i <= r.end ; i++)\n                {\n                    int distV = i*stride_;\n                    for (int j = n2; j <= width - n2; j++)\n                    {\n                        for(int d = 0; d < im_num; d++)\n                        {\n                            int c = 0;\n                            //the classic center symetric census which compares the curent pixel with its symetric not its center.\n                            for (int ii = -n2; ii <= 0; ii++)\n                            {\n                                int rrWidth = (ii + i) * stride_;\n                                for (int jj = -n2; jj <= +n2; jj++)\n                                {\n                                    if (image[d][(rrWidth + (jj + j))] > image[d][((ii * (-1) + i) * width + (-1 * jj) + j)])\n                                    {\n                                        c = c + 1;\n                                    }\n                                    c = c * 2;\n                                    if(ii == 0 && jj < 0)\n                                    {\n                                        if (image[d][(i * width + (jj + j))] > image[d][(i * width + (-1 * jj) + j)])\n                                        {\n                                            c = c + 1;\n                                        }\n                                        c = c * 2;\n                                    }\n                                }\n                            }\n                            dst[d][(distV + j)] = c;\n                        }\n                    }\n                }\n            }\n        };\n        /**\n        Two variations of census applied on input images\n        Implementation of a census transform which is taking into account just the some pixels from the census kernel thus allowing for larger block sizes\n        **/\n        //void applyCensusOnImages(const cv::Mat &im1,const cv::Mat &im2, int kernelSize, cv::Mat &dist, cv::Mat &dist2, const int type);\n        CV_EXPORTS void censusTransform(const cv::Mat &image1, const cv::Mat &image2, int kernelSize, cv::Mat &dist1, cv::Mat &dist2, const int type);\n        //single image census transform\n        CV_EXPORTS void censusTransform(const cv::Mat &image1, int kernelSize, cv::Mat &dist1, const int type);\n        /**\n        STANDARD_MCT - Modified census which is memorizing for each pixel 2 bits and includes a tolerance to the pixel comparison\n        MCT_MEAN_VARIATION - Implementation of a modified census transform which is also taking into account the variation to the mean of the window not just the center pixel\n        **/\n        CV_EXPORTS void modifiedCensusTransform(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist1,cv::Mat &dist2, const int type, int t = 0 , const cv::Mat &IntegralImage1 = cv::Mat::zeros(100,100,CV_8UC1), const cv::Mat &IntegralImage2 = cv::Mat::zeros(100,100,CV_8UC1));\n        //single version of modified census transform descriptor\n        CV_EXPORTS void modifiedCensusTransform(const cv::Mat &img1, int kernelSize, cv::Mat &dist, const int type, int t = 0 ,const cv::Mat &IntegralImage = cv::Mat::zeros(100,100,CV_8UC1));\n        /**The classical center symetric census\n        A modified version of cs census which is comparing a pixel with its correspondent after the center\n        **/\n        CV_EXPORTS void symetricCensusTransform(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist1, cv::Mat &dist2, const int type);\n        //single version of census transform\n        CV_EXPORTS void symetricCensusTransform(const cv::Mat &img1, int kernelSize, cv::Mat &dist1, const int type);\n        //in a 9x9 kernel only certain positions are choosen\n        CV_EXPORTS void starCensusTransform(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist1,cv::Mat &dist2);\n        //single image version of star kernel\n        CV_EXPORTS void starCensusTransform(const cv::Mat &img1, int kernelSize, cv::Mat &dist);\n        //integral image computation used in the Mean Variation Census Transform\n        void imageMeanKernelSize(const cv::Mat &img, int windowSize, cv::Mat &c);\n    }\n}\n#endif\n#endif\n/*End of file*/\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stereo/matching.hpp",
    "content": "//By downloading, copying, installing or using the software you agree to this license.\n//If you do not agree to this license, do not download, install,\n//copy or use the software.\n//\n//\n//                          License Agreement\n//               For Open Source Computer Vision Library\n//                       (3-clause BSD License)\n//\n//Copyright (C) 2000-2015, Intel Corporation, all rights reserved.\n//Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n//Copyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.\n//Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.\n//Copyright (C) 2015, OpenCV Foundation, all rights reserved.\n//Copyright (C) 2015, Itseez Inc., all rights reserved.\n//Third party copyrights are property of their respective owners.\n//\n//Redistribution and use in source and binary forms, with or without modification,\n//are permitted provided that the following conditions are met:\n//\n//  * Redistributions of source code must retain the above copyright notice,\n//    this list of conditions and the following disclaimer.\n//\n//  * Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n//\n//  * Neither the names of the copyright holders nor the names of the contributors\n//    may be used to endorse or promote products derived from this software\n//    without specific prior written permission.\n//\n//This software is provided by the copyright holders and contributors \"as is\" and\n//any express or implied warranties, including, but not limited to, the implied\n//warranties of merchantability and fitness for a particular purpose are disclaimed.\n//In no event shall copyright holders or contributors be liable for any direct,\n//indirect, incidental, special, exemplary, or consequential damages\n//(including, but not limited to, procurement of substitute goods or services;\n//loss of use, data, or profits; or business interruption) however caused\n//and on any theory of liability, whether in contract, strict liability,\n//or tort (including negligence or otherwise) arising in any way out of\n//the use of this software, even if advised of the possibility of such damage.\n\n/*****************************************************************************************************************\\\n*   The interface contains the main methods for computing the matching between the left and right images\t      *\n*                                                                                                                 *\n\\******************************************************************************************************************/\n#include <stdint.h>\n\n#ifndef _OPENCV_MATCHING_HPP_\n#define _OPENCV_MATCHING_HPP_\n#ifdef __cplusplus\n\nnamespace cv\n{\n    namespace stereo\n    {\n        class Matching\n        {\n        private:\n            //!The maximum disparity\n            int maxDisparity;\n            //!the factor by which we are multiplying the disparity\n            int scallingFactor;\n            //!the confidence to which a min disparity found is good or not\n            double confidenceCheck;\n            //!the LUT used in case SSE is not available\n            int hamLut[65537];\n            //!function used for getting the minimum disparity from the cost volume\"\n            static int minim(short *c, int iwpj, int widthDisp,const double confidence, const int search_region)\n            {\n                double mini, mini2, mini3;\n                mini = mini2 = mini3 = DBL_MAX;\n                int index = 0;\n                int iw = iwpj;\n                int widthDisp2;\n                widthDisp2 = widthDisp;\n                widthDisp -= 1;\n                for (int i = 0; i <= widthDisp; i++)\n                {\n                    if (c[(iw + i * search_region) * widthDisp2 + i] < mini)\n                    {\n                        mini3 = mini2;\n                        mini2 = mini;\n                        mini = c[(iw + i * search_region) * widthDisp2 + i];\n                        index = i;\n                    }\n                    else if (c[(iw + i * search_region) * widthDisp2 + i] < mini2)\n                    {\n                        mini3 = mini2;\n                        mini2 = c[(iw + i * search_region) * widthDisp2 + i];\n                    }\n                    else if (c[(iw + i * search_region) * widthDisp2 + i] < mini3)\n                    {\n                        mini3 = c[(iw + i * search_region) * widthDisp2 + i];\n                    }\n                }\n                if(mini != 0)\n                {\n                    if (mini3 / mini <= confidence)\n                        return index;\n                }\n                return -1;\n            }\n            //!Interpolate in order to obtain better results\n            //!function for refining the disparity at sub pixel using simetric v\n            static double symetricVInterpolation(short *c, int iwjp, int widthDisp, int winDisp,const int search_region)\n            {\n                if (winDisp == 0 || winDisp == widthDisp - 1)\n                    return winDisp;\n                double m2m1, m3m1, m3, m2, m1;\n                m2 = c[(iwjp + (winDisp - 1) * search_region) * widthDisp + winDisp - 1];\n                m3 = c[(iwjp + (winDisp + 1) * search_region)* widthDisp + winDisp + 1];\n                m1 = c[(iwjp + winDisp * search_region) * widthDisp + winDisp];\n                m2m1 = m2 - m1;\n                m3m1 = m3 - m1;\n                if (m2m1 == 0 || m3m1 == 0) return winDisp;\n                double p;\n                p = 0;\n                if (m2 > m3)\n                {\n                    p = (0.5 - 0.25 * ((m3m1 * m3m1) / (m2m1 * m2m1) + (m3m1 / m2m1)));\n                }\n                else\n                {\n                    p = -1 * (0.5 - 0.25 * ((m2m1 * m2m1) / (m3m1 * m3m1) + (m2m1 / m3m1)));\n                }\n                if (p >= -0.5 && p <= 0.5)\n                    p = winDisp + p;\n                return p;\n            }\n            //!a pre processing function that generates the Hamming LUT in case the algorithm will ever be used on platform where SSE is not available\n            void hammingLut()\n            {\n                for (int i = 0; i <= 65536; i++)\n                {\n                    int dist = 0;\n                    int j = i;\n                    //we number the bits from our number\n                    while (j)\n                    {\n                        dist = dist + 1;\n                        j = j & (j - 1);\n                    }\n                    hamLut[i] = dist;\n                }\n            }\n            //!the class used in computing the hamming distance\n            class hammingDistance : public ParallelLoopBody\n            {\n            private:\n                int *left, *right;\n                short *c;\n                int v,kernelSize, width;\n                int MASK;\n                int *hammLut;\n            public :\n                hammingDistance(const Mat &leftImage, const Mat &rightImage, short *cost, int maxDisp, int kerSize, int *hammingLUT):\n                    left((int *)leftImage.data), right((int *)rightImage.data), c(cost), v(maxDisp),kernelSize(kerSize),width(leftImage.cols), MASK(65535), hammLut(hammingLUT){}\n                void operator()(const cv::Range &r) const {\n                    for (int i = r.start; i <= r.end ; i++)\n                    {\n                        int iw = i * width;\n                        for (int j = kernelSize; j < width - kernelSize; j++)\n                        {\n                            int j2;\n                            int xorul;\n                            int iwj;\n                            iwj = iw + j;\n                            for (int d = 0; d <= v; d++)\n                            {\n                                j2 = (0 > j - d) ? (0) : (j - d);\n                                xorul = left[(iwj)] ^ right[(iw + j2)];\n#if CV_SSE4_1\n                                c[(iwj)* (v + 1) + d] = (short)_mm_popcnt_u32(xorul);\n#else\n                                c[(iwj)* (v + 1) + d] = (short)(hammLut[xorul & MASK] + hammLut[(xorul >> 16) & MASK]);\n#endif\n                            }\n                        }\n                    }\n                }\n            };\n            //!cost aggregation\n            class agregateCost:public ParallelLoopBody\n            {\n            private:\n                int win;\n                short *c, *parSum;\n                int maxDisp,width, height;\n            public:\n                agregateCost(const Mat &partialSums, int windowSize, int maxDispa, Mat &cost)\n                {\n                    win = windowSize / 2;\n                    c = (short *)cost.data;\n                    maxDisp = maxDispa;\n                    width = cost.cols / ( maxDisp + 1) - 1;\n                    height = cost.rows - 1;\n                    parSum = (short *)partialSums.data;\n                }\n                void operator()(const cv::Range &r) const {\n                    for (int i = r.start; i <= r.end; i++)\n                    {\n                        int iwi = (i - 1) * width;\n                        for (int j = win + 1; j <= width - win - 1; j++)\n                        {\n                            int w1 = ((i + win + 1) * width + j + win) * (maxDisp + 1);\n                            int w2 = ((i - win) * width + j - win - 1) * (maxDisp + 1);\n                            int w3 = ((i + win + 1) * width + j - win - 1) * (maxDisp + 1);\n                            int w4 = ((i - win) * width + j + win) * (maxDisp + 1);\n                            int w = (iwi + j - 1) * (maxDisp + 1);\n                            for (int d = 0; d <= maxDisp; d++)\n                            {\n                                c[w + d] = parSum[w1 + d] + parSum[w2 + d]\n                                - parSum[w3 + d] - parSum[w4 + d];\n                            }\n                        }\n                    }\n                }\n            };\n            //!class that is responsable for generating the disparity map\n            class makeMap:public ParallelLoopBody\n            {\n            private:\n                //enum used to notify wether we are searching on the vertical ie (lr) or diagonal (rl)\n                enum {CV_VERTICAL_SEARCH, CV_DIAGONAL_SEARCH};\n                int width,disparity,scallingFact,th;\n                double confCheck;\n                uint8_t *map;\n                short *c;\n            public:\n                makeMap(const Mat &costVolume, int threshold, int maxDisp, double confidence,int scale, Mat &mapFinal)\n                {\n                    c = (short *)costVolume.data;\n                    map = mapFinal.data;\n                    disparity = maxDisp;\n                    width = costVolume.cols / ( disparity + 1) - 1;\n                    th = threshold;\n                    scallingFact = scale;\n                    confCheck = confidence;\n                }\n                void operator()(const cv::Range &r) const {\n                    for (int i = r.start; i <= r.end ; i++)\n                    {\n                        int lr;\n                        int v = -1;\n                        double p1, p2;\n                        int iw = i * width;\n                        for (int j = 0; j < width; j++)\n                        {\n                            lr = Matching:: minim(c, iw + j, disparity + 1, confCheck,CV_VERTICAL_SEARCH);\n                            if (lr != -1)\n                            {\n                                v = Matching::minim(c, iw + j - lr, disparity + 1, confCheck,CV_DIAGONAL_SEARCH);\n                                if (v != -1)\n                                {\n                                    p1 = Matching::symetricVInterpolation(c, iw + j - lr, disparity + 1, v,CV_DIAGONAL_SEARCH);\n                                    p2 = Matching::symetricVInterpolation(c, iw + j, disparity + 1, lr,CV_VERTICAL_SEARCH);\n                                    if (abs(p1 - p2) <= th)\n                                        map[iw + j] = (uint8_t)((p2)* scallingFact);\n                                    else\n                                    {\n                                        map[iw + j] = 0;\n                                    }\n                                }\n                                else\n                                {\n                                    if (width - j <= disparity)\n                                    {\n                                        p2 = Matching::symetricVInterpolation(c, iw + j, disparity + 1, lr,CV_VERTICAL_SEARCH);\n                                        map[iw + j] = (uint8_t)(p2* scallingFact);\n                                    }\n                                }\n                            }\n                            else\n                            {\n                                map[iw + j] = 0;\n                            }\n                        }\n                    }\n                }\n            };\n            //!median 1x9 paralelized filter\n            template <typename T>\n            class Median1x9:public ParallelLoopBody\n            {\n            private:\n                T *original;\n                T *filtered;\n                int height, width;\n            public:\n                Median1x9(const Mat &originalImage, Mat &filteredImage)\n                {\n                    original = (T *)originalImage.data;\n                    filtered = (T *)filteredImage.data;\n                    height = originalImage.rows;\n                    width = originalImage.cols;\n                }\n                void operator()(const cv::Range &r) const{\n                    for (int m = r.start; m <= r.end; m++)\n                    {\n                        for (int n = 4; n < width - 4; ++n)\n                        {\n                            int k = 0;\n                            T window[9];\n                            for (int i = n - 4; i <= n + 4; ++i)\n                                window[k++] = original[m * width + i];\n                            for (int j = 0; j < 5; ++j)\n                            {\n                                int min = j;\n                                for (int l = j + 1; l < 9; ++l)\n                                    if (window[l] < window[min])\n                                        min = l;\n                                const T temp = window[j];\n                                window[j] = window[min];\n                                window[min] = temp;\n                            }\n                            filtered[m  * width + n] = window[4];\n                        }\n                    }\n                }\n            };\n            //!median 9x1 paralelized filter\n            template <typename T>\n            class Median9x1:public ParallelLoopBody\n            {\n            private:\n                T *original;\n                T *filtered;\n                int height, width;\n            public:\n                Median9x1(const Mat &originalImage, Mat &filteredImage)\n                {\n                    original = (T *)originalImage.data;\n                    filtered = (T *)filteredImage.data;\n                    height = originalImage.rows;\n                    width = originalImage.cols;\n                }\n                void operator()(const Range &r) const{\n                    for (int n = r.start; n <= r.end; ++n)\n                    {\n                        for (int m = 4; m < height - 4; ++m)\n                        {\n                            int k = 0;\n                            T window[9];\n                            for (int i = m - 4; i <= m + 4; ++i)\n                                window[k++] = original[i * width + n];\n                            for (int j = 0; j < 5; j++)\n                            {\n                                int min = j;\n                                for (int l = j + 1; l < 9; ++l)\n                                    if (window[l] < window[min])\n                                        min = l;\n                                const T temp = window[j];\n                                window[j] = window[min];\n                                window[min] = temp;\n                            }\n                            filtered[m  * width + n] = window[4];\n                        }\n                    }\n                }\n            };\n        protected:\n            //arrays used in the region removal\n            Mat speckleY;\n            Mat speckleX;\n            Mat puss;\n            //int *specklePointX;\n            //int *specklePointY;\n            //long long *pus;\n            int previous_size;\n            //!method for setting the maximum disparity\n            void setMaxDisparity(int val)\n            {\n                CV_Assert(val > 10);\n                this->maxDisparity = val;\n            }\n            //!method for getting the disparity\n            int getMaxDisparity()\n            {\n                return this->maxDisparity;\n            }\n            //! a number by which the disparity will be multiplied for better display\n            void setScallingFactor(int val)\n            {\n                CV_Assert(val > 0);\n                this->scallingFactor = val;\n            }\n            //!method for getting the scalling factor\n            int getScallingFactor()\n            {\n                return scallingFactor;\n            }\n            //!setter for the confidence check\n            void setConfidence(double val)\n            {\n                CV_Assert(val >= 1);\n                this->confidenceCheck = val;\n            }\n            //getter for confidence check\n            double getConfidence()\n            {\n                return confidenceCheck;\n            }\n            //! Hamming distance computation method\n            //! leftImage and rightImage are the two transformed images\n            //! the cost is the resulted cost volume and kernel Size is the size of the matching window\n            void hammingDistanceBlockMatching(const Mat &leftImage, const Mat &rightImage, Mat &cost, const int kernelSize= 9)\n            {\n                CV_Assert(leftImage.cols == rightImage.cols);\n                CV_Assert(leftImage.rows == rightImage.rows);\n                CV_Assert(kernelSize % 2 != 0);\n                CV_Assert(cost.rows == leftImage.rows);\n                CV_Assert(cost.cols / (maxDisparity + 1) == leftImage.cols);\n                short *c = (short *)cost.data;\n                memset(c, 0, sizeof(c[0]) * leftImage.cols * leftImage.rows * (maxDisparity + 1));\n                parallel_for_(cv::Range(kernelSize / 2,leftImage.rows - kernelSize / 2), hammingDistance(leftImage,rightImage,(short *)cost.data,maxDisparity,kernelSize / 2,hamLut));\n            }\n            //preprocessing the cost volume in order to get it ready for aggregation\n            void costGathering(const Mat &hammingDistanceCost, Mat &cost)\n            {\n                CV_Assert(hammingDistanceCost.rows == hammingDistanceCost.rows);\n                CV_Assert(hammingDistanceCost.type() == CV_16S);\n                CV_Assert(cost.type() == CV_16S);\n                int maxDisp = maxDisparity;\n                int width = cost.cols / ( maxDisp + 1) - 1;\n                int height = cost.rows - 1;\n                short *c = (short *)cost.data;\n                short *ham = (short *)hammingDistanceCost.data;\n                memset(c, 0, sizeof(c[0]) * (width + 1) * (height + 1) * (maxDisp + 1));\n                for (int i = 1; i <= height; i++)\n                {\n                    int iw = i * width;\n                    int iwi = (i - 1) * width;\n                    for (int j = 1; j <= width; j++)\n                    {\n                        int iwj = (iw + j) * (maxDisp + 1);\n                        int iwjmu = (iw + j - 1) * (maxDisp + 1);\n                        int iwijmu = (iwi + j - 1) * (maxDisp + 1);\n                        for (int d = 0; d <= maxDisp; d++)\n                        {\n                            c[iwj + d] = ham[iwijmu + d] + c[iwjmu + d];\n                        }\n                    }\n                }\n                for (int i = 1; i <= height; i++)\n                {\n                    for (int j = 1; j <= width; j++)\n                    {\n                        int iwj = (i * width + j) * (maxDisp + 1);\n                        int iwjmu = ((i - 1)  * width + j) * (maxDisp + 1);\n                        for (int d = 0; d <= maxDisp; d++)\n                        {\n                            c[iwj + d] += c[iwjmu + d];\n                        }\n                    }\n                }\n            }\n            //!The aggregation on the cost volume\n            void blockAgregation(const Mat &partialSums, int windowSize, Mat &cost)\n            {\n                CV_Assert(windowSize % 2 != 0);\n                CV_Assert(partialSums.rows == cost.rows);\n                CV_Assert(partialSums.cols == cost.cols);\n                int win = windowSize / 2;\n                short *c = (short *)cost.data;\n                int maxDisp = maxDisparity;\n                int width = cost.cols / ( maxDisp + 1) - 1;\n                int height = cost.rows - 1;\n                memset(c, 0, sizeof(c[0]) * width * height * (maxDisp + 1));\n                parallel_for_(cv::Range(win + 1,height - win - 1), agregateCost(partialSums,windowSize,maxDisp,cost));\n            }\n            //!remove small regions that have an area smaller than t, we fill the region with the average of the good pixels around it\n            template <typename T>\n            void smallRegionRemoval(const Mat &currentMap, int t, Mat &out)\n            {\n                CV_Assert(currentMap.cols == out.cols);\n                CV_Assert(currentMap.rows == out.rows);\n                CV_Assert(t >= 0);\n                int *pus = (int *)puss.data;\n                int *specklePointX = (int *)speckleX.data;\n                int *specklePointY = (int *)speckleY.data;\n                memset(pus, 0, previous_size * sizeof(pus[0]));\n                T *map = (T *)currentMap.data;\n                T *outputMap = (T *)out.data;\n                int height = currentMap.rows;\n                int width = currentMap.cols;\n                T k = 1;\n                int st, dr;\n                int di[] = { -1, -1, -1, 0, 1, 1, 1, 0 },\n                    dj[] = { -1, 0, 1, 1, 1, 0, -1, -1 };\n                int speckle_size = 0;\n                st = 0;\n                dr = 0;\n                for (int i = 1; i < height - 1; i++)\n                {\n                    int iw = i * width;\n                    for (int j = 1; j < width - 1; j++)\n                    {\n                        if (map[iw + j] != 0)\n                        {\n                            outputMap[iw + j] = map[iw + j];\n                        }\n                        else if (map[iw + j] == 0)\n                        {\n                            T nr = 1;\n                            T avg = 0;\n                            speckle_size = dr;\n                            specklePointX[dr] = i;\n                            specklePointY[dr] = j;\n                            pus[i * width + j] = 1;\n                            dr++;\n                            map[iw + j] = k;\n                            while (st < dr)\n                            {\n                                int ii = specklePointX[st];\n                                int jj = specklePointY[st];\n                                //going on 8 directions\n                                for (int d = 0; d < 8; d++)\n                                {//if insisde\n                                    if (ii + di[d] >= 0 && ii + di[d] < height && jj + dj[d] >= 0 && jj + dj[d] < width &&\n                                        pus[(ii + di[d]) * width + jj + dj[d]] == 0)\n                                    {\n                                        T val = map[(ii + di[d]) * width + jj + dj[d]];\n                                        if (val == 0)\n                                        {\n                                            map[(ii + di[d]) * width + jj + dj[d]] = k;\n                                            specklePointX[dr] = (ii + di[d]);\n                                            specklePointY[dr] = (jj + dj[d]);\n                                            dr++;\n                                            pus[(ii + di[d]) * width + jj + dj[d]] = 1;\n                                        }//this means that my point is a good point to be used in computing the final filling value\n                                        else if (val >= 1 && val < 250)\n                                        {\n                                            avg += val;\n                                            nr++;\n                                        }\n                                    }\n                                }\n                                st++;\n                            }//if hole size is smaller than a specified threshold we fill the respective hole with the average of the good neighbours\n                            if (st - speckle_size <= t)\n                            {\n                                T fillValue = (T)(avg / nr);\n                                while (speckle_size < st)\n                                {\n                                    int ii = specklePointX[speckle_size];\n                                    int jj = specklePointY[speckle_size];\n                                    outputMap[ii * width + jj] = fillValue;\n                                    speckle_size++;\n                                }\n                            }\n                        }\n                    }\n                }\n            }\n            //!Method responsible for generating the disparity map\n            //!function for generating disparity maps at sub pixel level\n            /* costVolume - represents the cost volume\n            * width, height - represent the width and height of the iage\n            *disparity - represents the maximum disparity\n            *map - is the disparity map that will result\n            *th - is the LR threshold\n            */\n            void dispartyMapFormation(const Mat &costVolume, Mat &mapFinal, int th)\n            {\n                uint8_t *map = mapFinal.data;\n                int disparity = maxDisparity;\n                int width = costVolume.cols / ( disparity + 1) - 1;\n                int height = costVolume.rows - 1;\n                memset(map, 0, sizeof(map[0]) * width * height);\n                parallel_for_(Range(0,height - 1), makeMap(costVolume,th,disparity,confidenceCheck,scallingFactor,mapFinal));\n            }\n        public:\n            //!a median filter of 1x9 and 9x1\n            //!1x9 median filter\n            template<typename T>\n            void Median1x9Filter(const Mat &originalImage, Mat &filteredImage)\n            {\n                CV_Assert(originalImage.rows == filteredImage.rows);\n                CV_Assert(originalImage.cols == filteredImage.cols);\n                parallel_for_(Range(1,originalImage.rows - 2), Median1x9<T>(originalImage,filteredImage));\n            }\n            //!9x1 median filter\n            template<typename T>\n            void Median9x1Filter(const Mat &originalImage, Mat &filteredImage)\n            {\n                CV_Assert(originalImage.cols == filteredImage.cols);\n                CV_Assert(originalImage.cols == filteredImage.cols);\n                parallel_for_(Range(1,originalImage.cols - 2), Median9x1<T>(originalImage,filteredImage));\n            }\n            //!constructor for the matching class\n            //!maxDisp - represents the maximum disparity\n            Matching(void)\n            {\n                hammingLut();\n            }\n            ~Matching(void)\n            {\n            }\n            //constructor for the matching class\n            //maxDisp - represents the maximum disparity\n            //confidence - represents the confidence check\n            Matching(int maxDisp, int scalling = 4, int confidence = 6)\n            {\n                //set the maximum disparity\n                setMaxDisparity(maxDisp);\n                //set scalling factor\n                setScallingFactor(scalling);\n                //set the value for the confidence\n                setConfidence(confidence);\n                //generate the hamming lut in case SSE is not available\n                hammingLut();\n            }\n        };\n    }\n}\n#endif\n#endif\n/*End of file*/\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stereo/stereo.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/stereo.hpp\"\n\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stereo.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STEREO_HPP__\n#define __OPENCV_STEREO_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/features2d.hpp\"\n#include \"opencv2/core/affine.hpp\"\n#include \"opencv2/stereo/descriptor.hpp\"\n#include \"opencv2/stereo/matching.hpp\"\n\n/**\n@defgroup stereo Stereo Correspondance Algorithms\n\n*/\n\nnamespace cv\n{\n    namespace stereo\n    {\n        //! @addtogroup stereo\n        //! @{\n        //\t\t void correctMatches( InputArray F, InputArray points1, InputArray points2,\n        //\tOutputArray newPoints1, OutputArray newPoints2 );\n        /** @brief Filters off small noise blobs (speckles) in the disparity map\n        @param img The input 16-bit signed disparity image\n        @param newVal The disparity value used to paint-off the speckles\n        @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not\n        affected by the algorithm\n        @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same\n        blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point\n        disparity map, where disparity values are multiplied by 16, this scale factor should be taken into\n        account when specifying this parameter value.\n        @param buf The optional temporary buffer to avoid memory allocation within the function.\n        */\n        /** @brief The base class for stereo correspondence algorithms.\n        */\n        class StereoMatcher : public Algorithm\n        {\n        public:\n            enum { DISP_SHIFT = 4,\n                DISP_SCALE = (1 << DISP_SHIFT)\n            };\n\n            /** @brief Computes disparity map for the specified stereo pair\n\n            @param left Left 8-bit single-channel image.\n            @param right Right image of the same size and the same type as the left one.\n            @param disparity Output disparity map. It has the same size as the input images. Some algorithms,\n            like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value\n            has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.\n            */\n            virtual void compute( InputArray left, InputArray right,\n                OutputArray disparity ) = 0;\n\n            virtual int getMinDisparity() const = 0;\n            virtual void setMinDisparity(int minDisparity) = 0;\n\n            virtual int getNumDisparities() const = 0;\n            virtual void setNumDisparities(int numDisparities) = 0;\n\n            virtual int getBlockSize() const = 0;\n            virtual void setBlockSize(int blockSize) = 0;\n\n            virtual int getSpeckleWindowSize() const = 0;\n            virtual void setSpeckleWindowSize(int speckleWindowSize) = 0;\n\n            virtual int getSpeckleRange() const = 0;\n            virtual void setSpeckleRange(int speckleRange) = 0;\n\n            virtual int getDisp12MaxDiff() const = 0;\n            virtual void setDisp12MaxDiff(int disp12MaxDiff) = 0;\n\n        };\n        //!speckle removal algorithms. These algorithms have the purpose of removing small regions\n        enum {\n            CV_SPECKLE_REMOVAL_ALGORITHM, CV_SPECKLE_REMOVAL_AVG_ALGORITHM\n        };\n        //!subpixel interpolationm methods for disparities.\n        enum{\n            CV_QUADRATIC_INTERPOLATION, CV_SIMETRICV_INTERPOLATION\n        };\n        /** @brief Class for computing stereo correspondence using the block matching algorithm, introduced and\n        contributed to OpenCV by K. Konolige.\n        */\n        class StereoBinaryBM : public StereoMatcher\n        {\n        public:\n            enum { PREFILTER_NORMALIZED_RESPONSE = 0,\n                PREFILTER_XSOBEL              = 1\n            };\n\n            virtual int getPreFilterType() const = 0;\n            virtual void setPreFilterType(int preFilterType) = 0;\n\n            virtual int getPreFilterSize() const = 0;\n            virtual void setPreFilterSize(int preFilterSize) = 0;\n\n            virtual int getPreFilterCap() const = 0;\n            virtual void setPreFilterCap(int preFilterCap) = 0;\n\n            virtual int getTextureThreshold() const = 0;\n            virtual void setTextureThreshold(int textureThreshold) = 0;\n\n            virtual int getUniquenessRatio() const = 0;\n            virtual void setUniquenessRatio(int uniquenessRatio) = 0;\n\n            virtual int getSmallerBlockSize() const = 0;\n            virtual void setSmallerBlockSize(int blockSize) = 0;\n\n            virtual int getScalleFactor() const = 0 ;\n            virtual void setScalleFactor(int factor) = 0;\n\n            virtual int getSpekleRemovalTechnique() const = 0 ;\n            virtual void setSpekleRemovalTechnique(int factor) = 0;\n\n            virtual bool getUsePrefilter() const = 0 ;\n            virtual void setUsePrefilter(bool factor) = 0;\n\n            virtual int getBinaryKernelType() const = 0;\n            virtual void setBinaryKernelType(int value) = 0;\n\n            virtual int getAgregationWindowSize() const = 0;\n            virtual void setAgregationWindowSize(int value) = 0;\n            /** @brief Creates StereoBM object\n\n            @param numDisparities the disparity search range. For each pixel algorithm will find the best\n            disparity from 0 (default minimum disparity) to numDisparities. The search range can then be\n            shifted by changing the minimum disparity.\n            @param blockSize the linear size of the blocks compared by the algorithm. The size should be odd\n            (as the block is centered at the current pixel). Larger block size implies smoother, though less\n            accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher\n            chance for algorithm to find a wrong correspondence.\n\n            The function create StereoBM object. You can then call StereoBM::compute() to compute disparity for\n            a specific stereo pair.\n            */\n            CV_EXPORTS static Ptr< cv::stereo::StereoBinaryBM > create(int numDisparities = 0, int blockSize = 9);\n        };\n\n        /** @brief The class implements the modified H. Hirschmuller algorithm @cite HH08 that differs from the original\n        one as follows:\n\n        -   By default, the algorithm is single-pass, which means that you consider only 5 directions\n        instead of 8. Set mode=StereoSGBM::MODE_HH in createStereoSGBM to run the full variant of the\n        algorithm but beware that it may consume a lot of memory.\n        -   The algorithm matches blocks, not individual pixels. Though, setting blockSize=1 reduces the\n        blocks to single pixels.\n        -   Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi\n        sub-pixel metric from @cite BT98 is used. Though, the color images are supported as well.\n        -   Some pre- and post- processing steps from K. Konolige algorithm StereoBM are included, for\n        example: pre-filtering (StereoBM::PREFILTER_XSOBEL type) and post-filtering (uniqueness\n        check, quadratic interpolation and speckle filtering).\n\n        @note\n        -   (Python) An example illustrating the use of the StereoSGBM matching algorithm can be found\n        at opencv_source_code/samples/python2/stereo_match.py\n        */\n        class StereoBinarySGBM : public StereoMatcher\n        {\n        public:\n            enum\n            {\n                MODE_SGBM = 0,\n                MODE_HH   = 1\n            };\n\n            virtual int getPreFilterCap() const = 0;\n            virtual void setPreFilterCap(int preFilterCap) = 0;\n\n            virtual int getUniquenessRatio() const = 0;\n            virtual void setUniquenessRatio(int uniquenessRatio) = 0;\n\n            virtual int getP1() const = 0;\n            virtual void setP1(int P1) = 0;\n\n            virtual int getP2() const = 0;\n            virtual void setP2(int P2) = 0;\n\n            virtual int getMode() const = 0;\n            virtual void setMode(int mode) = 0;\n\n            virtual int getSpekleRemovalTechnique() const = 0 ;\n            virtual void setSpekleRemovalTechnique(int factor) = 0;\n\n            virtual int getBinaryKernelType() const = 0;\n            virtual void setBinaryKernelType(int value) = 0;\n\n            virtual int getSubPixelInterpolationMethod() const = 0;\n            virtual void setSubPixelInterpolationMethod(int value) = 0;\n\n            /** @brief Creates StereoSGBM object\n\n            @param minDisparity Minimum possible disparity value. Normally, it is zero but sometimes\n            rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.\n            @param numDisparities Maximum disparity minus minimum disparity. The value is always greater than\n            zero. In the current implementation, this parameter must be divisible by 16.\n            @param blockSize Matched block size. It must be an odd number \\>=1 . Normally, it should be\n            somewhere in the 3..11 range.\n            @param P1 The first parameter controlling the disparity smoothness.This parameter is used for the case of slanted surfaces (not fronto parallel).\n            @param P2 The second parameter controlling the disparity smoothness.This parameter is used for \"solving\" the depth discontinuities problem.\n            The larger the values are, the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1\n            between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor\n            pixels. The algorithm requires P2 \\> P1 . See stereo_match.cpp sample where some reasonably good\n            P1 and P2 values are shown (like 8\\*number_of_image_channels\\*SADWindowSize\\*SADWindowSize and\n            32\\*number_of_image_channels\\*SADWindowSize\\*SADWindowSize , respectively).\n            @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right\n            disparity check. Set it to a non-positive value to disable the check.\n            @param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first\n            computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval.\n            The result values are passed to the Birchfield-Tomasi pixel cost function.\n            @param uniquenessRatio Margin in percentage by which the best (minimum) computed cost function\n            value should \"win\" the second best value to consider the found match correct. Normally, a value\n            within the 5-15 range is good enough.\n            @param speckleWindowSize Maximum size of smooth disparity regions to consider their noise speckles\n            and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the\n            50-200 range.\n            @param speckleRange Maximum disparity variation within each connected component. If you do speckle\n            filtering, set the parameter to a positive value, it will be implicitly multiplied by 16.\n            Normally, 1 or 2 is good enough.\n            @param mode Set it to StereoSGBM::MODE_HH to run the full-scale two-pass dynamic programming\n            algorithm. It will consume O(W\\*H\\*numDisparities) bytes, which is large for 640x480 stereo and\n            huge for HD-size pictures. By default, it is set to false .\n\n            The first constructor initializes StereoSGBM with all the default parameters. So, you only have to\n            set StereoSGBM::numDisparities at minimum. The second constructor enables you to set each parameter\n            to a custom value.\n            */\n            CV_EXPORTS static Ptr<cv::stereo::StereoBinarySGBM> create(int minDisparity, int numDisparities, int blockSize,\n                int P1 = 100, int P2 = 1000, int disp12MaxDiff = 1,\n                int preFilterCap = 0, int uniquenessRatio = 5,\n                int speckleWindowSize = 400, int speckleRange = 200,\n                int mode = StereoBinarySGBM::MODE_SGBM);\n        };\n        //! @}\n    }//stereo\n} // cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/autocalib.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_AUTOCALIB_HPP__\n#define __OPENCV_STITCHING_AUTOCALIB_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"matchers.hpp\"\n\nnamespace cv {\nnamespace detail {\n\n//! @addtogroup stitching_autocalib\n//! @{\n\n/** @brief Tries to estimate focal lengths from the given homography under the assumption that the camera\nundergoes rotations around its centre only.\n\n@param H Homography.\n@param f0 Estimated focal length along X axis.\n@param f1 Estimated focal length along Y axis.\n@param f0_ok True, if f0 was estimated successfully, false otherwise.\n@param f1_ok True, if f1 was estimated successfully, false otherwise.\n\nSee \"Construction of Panoramic Image Mosaics with Global and Local Alignment\"\nby Heung-Yeung Shum and Richard Szeliski.\n */\nvoid CV_EXPORTS focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok);\n\n/** @brief Estimates focal lengths for each given camera.\n\n@param features Features of images.\n@param pairwise_matches Matches between all image pairs.\n@param focals Estimated focal lengths for each camera.\n */\nvoid CV_EXPORTS estimateFocal(const std::vector<ImageFeatures> &features,\n                              const std::vector<MatchesInfo> &pairwise_matches,\n                              std::vector<double> &focals);\n\nbool CV_EXPORTS calibrateRotatingCamera(const std::vector<Mat> &Hs, Mat &K);\n\n//! @} stitching_autocalib\n\n} // namespace detail\n} // namespace cv\n\n#endif // __OPENCV_STITCHING_AUTOCALIB_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/blenders.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_BLENDERS_HPP__\n#define __OPENCV_STITCHING_BLENDERS_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv {\nnamespace detail {\n\n//! @addtogroup stitching_blend\n//! @{\n\n/** @brief Base class for all blenders.\n\nSimple blender which puts one image over another\n*/\nclass CV_EXPORTS Blender\n{\npublic:\n    virtual ~Blender() {}\n\n    enum { NO, FEATHER, MULTI_BAND };\n    static Ptr<Blender> createDefault(int type, bool try_gpu = false);\n\n    /** @brief Prepares the blender for blending.\n\n    @param corners Source images top-left corners\n    @param sizes Source image sizes\n     */\n    void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);\n    /** @overload */\n    virtual void prepare(Rect dst_roi);\n    /** @brief Processes the image.\n\n    @param img Source image\n    @param mask Source image mask\n    @param tl Source image top-left corners\n     */\n    virtual void feed(InputArray img, InputArray mask, Point tl);\n    /** @brief Blends and returns the final pano.\n\n    @param dst Final pano\n    @param dst_mask Final pano mask\n     */\n    virtual void blend(InputOutputArray dst, InputOutputArray dst_mask);\n\nprotected:\n    UMat dst_, dst_mask_;\n    Rect dst_roi_;\n};\n\n/** @brief Simple blender which mixes images at its borders.\n */\nclass CV_EXPORTS FeatherBlender : public Blender\n{\npublic:\n    FeatherBlender(float sharpness = 0.02f);\n\n    float sharpness() const { return sharpness_; }\n    void setSharpness(float val) { sharpness_ = val; }\n\n    void prepare(Rect dst_roi);\n    void feed(InputArray img, InputArray mask, Point tl);\n    void blend(InputOutputArray dst, InputOutputArray dst_mask);\n\n    //! Creates weight maps for fixed set of source images by their masks and top-left corners.\n    //! Final image can be obtained by simple weighting of the source images.\n    Rect createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,\n                          std::vector<UMat> &weight_maps);\n\nprivate:\n    float sharpness_;\n    UMat weight_map_;\n    UMat dst_weight_map_;\n};\n\ninline FeatherBlender::FeatherBlender(float _sharpness) { setSharpness(_sharpness); }\n\n/** @brief Blender which uses multi-band blending algorithm (see @cite BA83).\n */\nclass CV_EXPORTS MultiBandBlender : public Blender\n{\npublic:\n    MultiBandBlender(int try_gpu = false, int num_bands = 5, int weight_type = CV_32F);\n\n    int numBands() const { return actual_num_bands_; }\n    void setNumBands(int val) { actual_num_bands_ = val; }\n\n    void prepare(Rect dst_roi);\n    void feed(InputArray img, InputArray mask, Point tl);\n    void blend(InputOutputArray dst, InputOutputArray dst_mask);\n\nprivate:\n    int actual_num_bands_, num_bands_;\n    std::vector<UMat> dst_pyr_laplace_;\n    std::vector<UMat> dst_band_weights_;\n    Rect dst_roi_final_;\n    bool can_use_gpu_;\n    int weight_type_; //CV_32F or CV_16S\n};\n\n\n//////////////////////////////////////////////////////////////////////////////\n// Auxiliary functions\n\nvoid CV_EXPORTS normalizeUsingWeightMap(InputArray weight, InputOutputArray src);\n\nvoid CV_EXPORTS createWeightMap(InputArray mask, float sharpness, InputOutputArray weight);\n\nvoid CV_EXPORTS createLaplacePyr(InputArray img, int num_levels, std::vector<UMat>& pyr);\nvoid CV_EXPORTS createLaplacePyrGpu(InputArray img, int num_levels, std::vector<UMat>& pyr);\n\n// Restores source image\nvoid CV_EXPORTS restoreImageFromLaplacePyr(std::vector<UMat>& pyr);\nvoid CV_EXPORTS restoreImageFromLaplacePyrGpu(std::vector<UMat>& pyr);\n\n//! @}\n\n} // namespace detail\n} // namespace cv\n\n#endif // __OPENCV_STITCHING_BLENDERS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/camera.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_CAMERA_HPP__\n#define __OPENCV_STITCHING_CAMERA_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv {\nnamespace detail {\n\n//! @addtogroup stitching\n//! @{\n\n/** @brief Describes camera parameters.\n\n@note Translation is assumed to be zero during the whole stitching pipeline. :\n */\nstruct CV_EXPORTS CameraParams\n{\n    CameraParams();\n    CameraParams(const CameraParams& other);\n    const CameraParams& operator =(const CameraParams& other);\n    Mat K() const;\n\n    double focal; // Focal length\n    double aspect; // Aspect ratio\n    double ppx; // Principal point X\n    double ppy; // Principal point Y\n    Mat R; // Rotation\n    Mat t; // Translation\n};\n\n//! @}\n\n} // namespace detail\n} // namespace cv\n\n#endif // #ifndef __OPENCV_STITCHING_CAMERA_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/exposure_compensate.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__\n#define __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv {\nnamespace detail {\n\n//! @addtogroup stitching_exposure\n//! @{\n\n/** @brief Base class for all exposure compensators.\n */\nclass CV_EXPORTS ExposureCompensator\n{\npublic:\n    virtual ~ExposureCompensator() {}\n\n    enum { NO, GAIN, GAIN_BLOCKS };\n    static Ptr<ExposureCompensator> createDefault(int type);\n\n    /**\n    @param corners Source image top-left corners\n    @param images Source images\n    @param masks Image masks to update (second value in pair specifies the value which should be used\n    to detect where image is)\n     */\n    void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,\n              const std::vector<UMat> &masks);\n    /** @overload */\n    virtual void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,\n                      const std::vector<std::pair<UMat,uchar> > &masks) = 0;\n    /** @brief Compensate exposure in the specified image.\n\n    @param index Image index\n    @param corner Image top-left corner\n    @param image Image to process\n    @param mask Image mask\n     */\n    virtual void apply(int index, Point corner, InputOutputArray image, InputArray mask) = 0;\n};\n\n/** @brief Stub exposure compensator which does nothing.\n */\nclass CV_EXPORTS NoExposureCompensator : public ExposureCompensator\n{\npublic:\n    void feed(const std::vector<Point> &/*corners*/, const std::vector<UMat> &/*images*/,\n              const std::vector<std::pair<UMat,uchar> > &/*masks*/) { }\n    void apply(int /*index*/, Point /*corner*/, InputOutputArray /*image*/, InputArray /*mask*/) { }\n};\n\n/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image\nintensities, see @cite BL07 and @cite WJ10 for details.\n */\nclass CV_EXPORTS GainCompensator : public ExposureCompensator\n{\npublic:\n    void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,\n              const std::vector<std::pair<UMat,uchar> > &masks);\n    void apply(int index, Point corner, InputOutputArray image, InputArray mask);\n    std::vector<double> gains() const;\n\nprivate:\n    Mat_<double> gains_;\n};\n\n/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image block\nintensities, see @cite UES01 for details.\n */\nclass CV_EXPORTS BlocksGainCompensator : public ExposureCompensator\n{\npublic:\n    BlocksGainCompensator(int bl_width = 32, int bl_height = 32)\n            : bl_width_(bl_width), bl_height_(bl_height) {}\n    void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,\n              const std::vector<std::pair<UMat,uchar> > &masks);\n    void apply(int index, Point corner, InputOutputArray image, InputArray mask);\n\nprivate:\n    int bl_width_, bl_height_;\n    std::vector<UMat> gain_maps_;\n};\n\n//! @}\n\n} // namespace detail\n} // namespace cv\n\n#endif // __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/matchers.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_MATCHERS_HPP__\n#define __OPENCV_STITCHING_MATCHERS_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/features2d.hpp\"\n\n#include \"opencv2/opencv_modules.hpp\"\n\n#ifdef HAVE_OPENCV_XFEATURES2D\n#  include \"opencv2/xfeatures2d/cuda.hpp\"\n#endif\n\nnamespace cv {\nnamespace detail {\n\n//! @addtogroup stitching_match\n//! @{\n\n/** @brief Structure containing image keypoints and descriptors. */\nstruct CV_EXPORTS ImageFeatures\n{\n    int img_idx;\n    Size img_size;\n    std::vector<KeyPoint> keypoints;\n    UMat descriptors;\n};\n\n/** @brief Feature finders base class */\nclass CV_EXPORTS FeaturesFinder\n{\npublic:\n    virtual ~FeaturesFinder() {}\n    /** @overload */\n    void operator ()(InputArray image, ImageFeatures &features);\n    /** @brief Finds features in the given image.\n\n    @param image Source image\n    @param features Found features\n    @param rois Regions of interest\n\n    @sa detail::ImageFeatures, Rect_\n    */\n    void operator ()(InputArray image, ImageFeatures &features, const std::vector<cv::Rect> &rois);\n    /** @brief Frees unused memory allocated before if there is any. */\n    virtual void collectGarbage() {}\n\nprotected:\n    /** @brief This method must implement features finding logic in order to make the wrappers\n    detail::FeaturesFinder::operator()_ work.\n\n    @param image Source image\n    @param features Found features\n\n    @sa detail::ImageFeatures */\n    virtual void find(InputArray image, ImageFeatures &features) = 0;\n};\n\n/** @brief SURF features finder.\n\n@sa detail::FeaturesFinder, SURF\n*/\nclass CV_EXPORTS SurfFeaturesFinder : public FeaturesFinder\n{\npublic:\n    SurfFeaturesFinder(double hess_thresh = 300., int num_octaves = 3, int num_layers = 4,\n                       int num_octaves_descr = /*4*/3, int num_layers_descr = /*2*/4);\n\nprivate:\n    void find(InputArray image, ImageFeatures &features);\n\n    Ptr<FeatureDetector> detector_;\n    Ptr<DescriptorExtractor> extractor_;\n    Ptr<Feature2D> surf;\n};\n\n/** @brief ORB features finder. :\n\n@sa detail::FeaturesFinder, ORB\n*/\nclass CV_EXPORTS OrbFeaturesFinder : public FeaturesFinder\n{\npublic:\n    OrbFeaturesFinder(Size _grid_size = Size(3,1), int nfeatures=1500, float scaleFactor=1.3f, int nlevels=5);\n\nprivate:\n    void find(InputArray image, ImageFeatures &features);\n\n    Ptr<ORB> orb;\n    Size grid_size;\n};\n\n\n#ifdef HAVE_OPENCV_XFEATURES2D\nclass CV_EXPORTS SurfFeaturesFinderGpu : public FeaturesFinder\n{\npublic:\n    SurfFeaturesFinderGpu(double hess_thresh = 300., int num_octaves = 3, int num_layers = 4,\n                          int num_octaves_descr = 4, int num_layers_descr = 2);\n\n    void collectGarbage();\n\nprivate:\n    void find(InputArray image, ImageFeatures &features);\n\n    cuda::GpuMat image_;\n    cuda::GpuMat gray_image_;\n    cuda::SURF_CUDA surf_;\n    cuda::GpuMat keypoints_;\n    cuda::GpuMat descriptors_;\n    int num_octaves_, num_layers_;\n    int num_octaves_descr_, num_layers_descr_;\n};\n#endif\n\n/** @brief Structure containing information about matches between two images.\n\nIt's assumed that there is a homography between those images.\n*/\nstruct CV_EXPORTS MatchesInfo\n{\n    MatchesInfo();\n    MatchesInfo(const MatchesInfo &other);\n    const MatchesInfo& operator =(const MatchesInfo &other);\n\n    int src_img_idx, dst_img_idx;       //!< Images indices (optional)\n    std::vector<DMatch> matches;\n    std::vector<uchar> inliers_mask;    //!< Geometrically consistent matches mask\n    int num_inliers;                    //!< Number of geometrically consistent matches\n    Mat H;                              //!< Estimated homography\n    double confidence;                  //!< Confidence two images are from the same panorama\n};\n\n/** @brief Feature matchers base class. */\nclass CV_EXPORTS FeaturesMatcher\n{\npublic:\n    virtual ~FeaturesMatcher() {}\n\n    /** @overload\n    @param features1 First image features\n    @param features2 Second image features\n    @param matches_info Found matches\n    */\n    void operator ()(const ImageFeatures &features1, const ImageFeatures &features2,\n                     MatchesInfo& matches_info) { match(features1, features2, matches_info); }\n\n    /** @brief Performs images matching.\n\n    @param features Features of the source images\n    @param pairwise_matches Found pairwise matches\n    @param mask Mask indicating which image pairs must be matched\n\n    The function is parallelized with the TBB library.\n\n    @sa detail::MatchesInfo\n    */\n    void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,\n                     const cv::UMat &mask = cv::UMat());\n\n    /** @return True, if it's possible to use the same matcher instance in parallel, false otherwise\n    */\n    bool isThreadSafe() const { return is_thread_safe_; }\n\n    /** @brief Frees unused memory allocated before if there is any.\n    */\n    virtual void collectGarbage() {}\n\nprotected:\n    FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {}\n\n    /** @brief This method must implement matching logic in order to make the wrappers\n    detail::FeaturesMatcher::operator()_ work.\n\n    @param features1 first image features\n    @param features2 second image features\n    @param matches_info found matches\n     */\n    virtual void match(const ImageFeatures &features1, const ImageFeatures &features2,\n                       MatchesInfo& matches_info) = 0;\n\n    bool is_thread_safe_;\n};\n\n/** @brief Features matcher which finds two best matches for each feature and leaves the best one only if the\nratio between descriptor distances is greater than the threshold match_conf\n\n@sa detail::FeaturesMatcher\n */\nclass CV_EXPORTS BestOf2NearestMatcher : public FeaturesMatcher\n{\npublic:\n    /** @brief Constructs a \"best of 2 nearest\" matcher.\n\n    @param try_use_gpu Should try to use GPU or not\n    @param match_conf Match distances ration threshold\n    @param num_matches_thresh1 Minimum number of matches required for the 2D projective transform\n    estimation used in the inliers classification step\n    @param num_matches_thresh2 Minimum number of matches required for the 2D projective transform\n    re-estimation on inliers\n     */\n    BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,\n                          int num_matches_thresh2 = 6);\n\n    void collectGarbage();\n\nprotected:\n    void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info);\n\n    int num_matches_thresh1_;\n    int num_matches_thresh2_;\n    Ptr<FeaturesMatcher> impl_;\n};\n\nclass CV_EXPORTS BestOf2NearestRangeMatcher : public BestOf2NearestMatcher\n{\npublic:\n    BestOf2NearestRangeMatcher(int range_width = 5, bool try_use_gpu = false, float match_conf = 0.3f,\n                            int num_matches_thresh1 = 6, int num_matches_thresh2 = 6);\n\n    void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,\n                     const cv::UMat &mask = cv::UMat());\n\n\nprotected:\n    int range_width_;\n};\n\n//! @} stitching_match\n\n} // namespace detail\n} // namespace cv\n\n#endif // __OPENCV_STITCHING_MATCHERS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/motion_estimators.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__\n#define __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"matchers.hpp\"\n#include \"util.hpp\"\n#include \"camera.hpp\"\n\nnamespace cv {\nnamespace detail {\n\n//! @addtogroup stitching_rotation\n//! @{\n\n/** @brief Rotation estimator base class.\n\nIt takes features of all images, pairwise matches between all images and estimates rotations of all\ncameras.\n\n@note The coordinate system origin is implementation-dependent, but you can always normalize the\nrotations in respect to the first camera, for instance. :\n */\nclass CV_EXPORTS Estimator\n{\npublic:\n    virtual ~Estimator() {}\n\n    /** @brief Estimates camera parameters.\n\n    @param features Features of images\n    @param pairwise_matches Pairwise matches of images\n    @param cameras Estimated camera parameters\n    @return True in case of success, false otherwise\n     */\n    bool operator ()(const std::vector<ImageFeatures> &features,\n                     const std::vector<MatchesInfo> &pairwise_matches,\n                     std::vector<CameraParams> &cameras)\n        { return estimate(features, pairwise_matches, cameras); }\n\nprotected:\n    /** @brief This method must implement camera parameters estimation logic in order to make the wrapper\n    detail::Estimator::operator()_ work.\n\n    @param features Features of images\n    @param pairwise_matches Pairwise matches of images\n    @param cameras Estimated camera parameters\n    @return True in case of success, false otherwise\n     */\n    virtual bool estimate(const std::vector<ImageFeatures> &features,\n                          const std::vector<MatchesInfo> &pairwise_matches,\n                          std::vector<CameraParams> &cameras) = 0;\n};\n\n/** @brief Homography based rotation estimator.\n */\nclass CV_EXPORTS HomographyBasedEstimator : public Estimator\n{\npublic:\n    HomographyBasedEstimator(bool is_focals_estimated = false)\n        : is_focals_estimated_(is_focals_estimated) {}\n\nprivate:\n    virtual bool estimate(const std::vector<ImageFeatures> &features,\n                          const std::vector<MatchesInfo> &pairwise_matches,\n                          std::vector<CameraParams> &cameras);\n\n    bool is_focals_estimated_;\n};\n\n/** @brief Base class for all camera parameters refinement methods.\n */\nclass CV_EXPORTS BundleAdjusterBase : public Estimator\n{\npublic:\n    const Mat refinementMask() const { return refinement_mask_.clone(); }\n    void setRefinementMask(const Mat &mask)\n    {\n        CV_Assert(mask.type() == CV_8U && mask.size() == Size(3, 3));\n        refinement_mask_ = mask.clone();\n    }\n\n    double confThresh() const { return conf_thresh_; }\n    void setConfThresh(double conf_thresh) { conf_thresh_ = conf_thresh; }\n\n    TermCriteria termCriteria() { return term_criteria_; }\n    void setTermCriteria(const TermCriteria& term_criteria) { term_criteria_ = term_criteria; }\n\nprotected:\n    /** @brief Construct a bundle adjuster base instance.\n\n    @param num_params_per_cam Number of parameters per camera\n    @param num_errs_per_measurement Number of error terms (components) per match\n     */\n    BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement)\n        : num_params_per_cam_(num_params_per_cam),\n          num_errs_per_measurement_(num_errs_per_measurement)\n    {\n        setRefinementMask(Mat::ones(3, 3, CV_8U));\n        setConfThresh(1.);\n        setTermCriteria(TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 1000, DBL_EPSILON));\n    }\n\n    // Runs bundle adjustment\n    virtual bool estimate(const std::vector<ImageFeatures> &features,\n                          const std::vector<MatchesInfo> &pairwise_matches,\n                          std::vector<CameraParams> &cameras);\n\n    /** @brief Sets initial camera parameter to refine.\n\n    @param cameras Camera parameters\n     */\n    virtual void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) = 0;\n    /** @brief Gets the refined camera parameters.\n\n    @param cameras Refined camera parameters\n     */\n    virtual void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const = 0;\n    /** @brief Calculates error vector.\n\n    @param err Error column-vector of length total_num_matches \\* num_errs_per_measurement\n     */\n    virtual void calcError(Mat &err) = 0;\n    /** @brief Calculates the cost function jacobian.\n\n    @param jac Jacobian matrix of dimensions\n    (total_num_matches \\* num_errs_per_measurement) x (num_images \\* num_params_per_cam)\n     */\n    virtual void calcJacobian(Mat &jac) = 0;\n\n    // 3x3 8U mask, where 0 means don't refine respective parameter, != 0 means refine\n    Mat refinement_mask_;\n\n    int num_images_;\n    int total_num_matches_;\n\n    int num_params_per_cam_;\n    int num_errs_per_measurement_;\n\n    const ImageFeatures *features_;\n    const MatchesInfo *pairwise_matches_;\n\n    // Threshold to filter out poorly matched image pairs\n    double conf_thresh_;\n\n    //Levenberg–Marquardt algorithm termination criteria\n    TermCriteria term_criteria_;\n\n    // Camera parameters matrix (CV_64F)\n    Mat cam_params_;\n\n    // Connected images pairs\n    std::vector<std::pair<int,int> > edges_;\n};\n\n\n/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the reprojection\nerror squares\n\nIt can estimate focal length, aspect ratio, principal point.\nYou can affect only on them via the refinement mask.\n */\nclass CV_EXPORTS BundleAdjusterReproj : public BundleAdjusterBase\n{\npublic:\n    BundleAdjusterReproj() : BundleAdjusterBase(7, 2) {}\n\nprivate:\n    void setUpInitialCameraParams(const std::vector<CameraParams> &cameras);\n    void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const;\n    void calcError(Mat &err);\n    void calcJacobian(Mat &jac);\n\n    Mat err1_, err2_;\n};\n\n\n/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the distances\nbetween the rays passing through the camera center and a feature. :\n\nIt can estimate focal length. It ignores the refinement mask for now.\n */\nclass CV_EXPORTS BundleAdjusterRay : public BundleAdjusterBase\n{\npublic:\n    BundleAdjusterRay() : BundleAdjusterBase(4, 3) {}\n\nprivate:\n    void setUpInitialCameraParams(const std::vector<CameraParams> &cameras);\n    void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const;\n    void calcError(Mat &err);\n    void calcJacobian(Mat &jac);\n\n    Mat err1_, err2_;\n};\n\n\nenum WaveCorrectKind\n{\n    WAVE_CORRECT_HORIZ,\n    WAVE_CORRECT_VERT\n};\n\n/** @brief Tries to make panorama more horizontal (or vertical).\n\n@param rmats Camera rotation matrices.\n@param kind Correction kind, see detail::WaveCorrectKind.\n */\nvoid CV_EXPORTS waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind);\n\n\n//////////////////////////////////////////////////////////////////////////////\n// Auxiliary functions\n\n// Returns matches graph representation in DOT language\nString CV_EXPORTS matchesGraphAsString(std::vector<String> &pathes, std::vector<MatchesInfo> &pairwise_matches,\n                                            float conf_threshold);\n\nstd::vector<int> CV_EXPORTS leaveBiggestComponent(\n        std::vector<ImageFeatures> &features,\n        std::vector<MatchesInfo> &pairwise_matches,\n        float conf_threshold);\n\nvoid CV_EXPORTS findMaxSpanningTree(\n        int num_images, const std::vector<MatchesInfo> &pairwise_matches,\n        Graph &span_tree, std::vector<int> &centers);\n\n//! @} stitching_rotation\n\n} // namespace detail\n} // namespace cv\n\n#endif // __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/seam_finders.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_SEAM_FINDERS_HPP__\n#define __OPENCV_STITCHING_SEAM_FINDERS_HPP__\n\n#include <set>\n#include \"opencv2/core.hpp\"\n#include \"opencv2/opencv_modules.hpp\"\n\nnamespace cv {\nnamespace detail {\n\n//! @addtogroup stitching_seam\n//! @{\n\n/** @brief Base class for a seam estimator.\n */\nclass CV_EXPORTS SeamFinder\n{\npublic:\n    virtual ~SeamFinder() {}\n    /** @brief Estimates seams.\n\n    @param src Source images\n    @param corners Source image top-left corners\n    @param masks Source image masks to update\n     */\n    virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,\n                      std::vector<UMat> &masks) = 0;\n};\n\n/** @brief Stub seam estimator which does nothing.\n */\nclass CV_EXPORTS NoSeamFinder : public SeamFinder\n{\npublic:\n    void find(const std::vector<UMat>&, const std::vector<Point>&, std::vector<UMat>&) {}\n};\n\n/** @brief Base class for all pairwise seam estimators.\n */\nclass CV_EXPORTS PairwiseSeamFinder : public SeamFinder\n{\npublic:\n    virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,\n                      std::vector<UMat> &masks);\n\nprotected:\n    void run();\n    /** @brief Resolves masks intersection of two specified images in the given ROI.\n\n    @param first First image index\n    @param second Second image index\n    @param roi Region of interest\n     */\n    virtual void findInPair(size_t first, size_t second, Rect roi) = 0;\n\n    std::vector<UMat> images_;\n    std::vector<Size> sizes_;\n    std::vector<Point> corners_;\n    std::vector<UMat> masks_;\n};\n\n/** @brief Voronoi diagram-based seam estimator.\n */\nclass CV_EXPORTS VoronoiSeamFinder : public PairwiseSeamFinder\n{\npublic:\n    virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,\n                      std::vector<UMat> &masks);\n    virtual void find(const std::vector<Size> &size, const std::vector<Point> &corners,\n                      std::vector<UMat> &masks);\nprivate:\n    void findInPair(size_t first, size_t second, Rect roi);\n};\n\n\nclass CV_EXPORTS DpSeamFinder : public SeamFinder\n{\npublic:\n    enum CostFunction { COLOR, COLOR_GRAD };\n\n    DpSeamFinder(CostFunction costFunc = COLOR);\n\n    CostFunction costFunction() const { return costFunc_; }\n    void setCostFunction(CostFunction val) { costFunc_ = val; }\n\n    virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,\n                      std::vector<UMat> &masks);\n\nprivate:\n    enum ComponentState\n    {\n        FIRST = 1, SECOND = 2, INTERS = 4,\n        INTERS_FIRST = INTERS | FIRST,\n        INTERS_SECOND = INTERS | SECOND\n    };\n\n    class ImagePairLess\n    {\n    public:\n        ImagePairLess(const std::vector<Mat> &images, const std::vector<Point> &corners)\n            : src_(&images[0]), corners_(&corners[0]) {}\n\n        bool operator() (const std::pair<size_t, size_t> &l, const std::pair<size_t, size_t> &r) const\n        {\n            Point c1 = corners_[l.first] + Point(src_[l.first].cols / 2, src_[l.first].rows / 2);\n            Point c2 = corners_[l.second] + Point(src_[l.second].cols / 2, src_[l.second].rows / 2);\n            int d1 = (c1 - c2).dot(c1 - c2);\n\n            c1 = corners_[r.first] + Point(src_[r.first].cols / 2, src_[r.first].rows / 2);\n            c2 = corners_[r.second] + Point(src_[r.second].cols / 2, src_[r.second].rows / 2);\n            int d2 = (c1 - c2).dot(c1 - c2);\n\n            return d1 < d2;\n        }\n\n    private:\n        const Mat *src_;\n        const Point *corners_;\n    };\n\n    class ClosePoints\n    {\n    public:\n        ClosePoints(int minDist) : minDist_(minDist) {}\n\n        bool operator() (const Point &p1, const Point &p2) const\n        {\n            int dist2 = (p1.x-p2.x) * (p1.x-p2.x) + (p1.y-p2.y) * (p1.y-p2.y);\n            return dist2 < minDist_ * minDist_;\n        }\n\n    private:\n        int minDist_;\n    };\n\n    void process(\n            const Mat &image1, const Mat &image2, Point tl1, Point tl2,  Mat &mask1, Mat &mask2);\n\n    void findComponents();\n\n    void findEdges();\n\n    void resolveConflicts(\n            const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2);\n\n    void computeGradients(const Mat &image1, const Mat &image2);\n\n    bool hasOnlyOneNeighbor(int comp);\n\n    bool closeToContour(int y, int x, const Mat_<uchar> &contourMask);\n\n    bool getSeamTips(int comp1, int comp2, Point &p1, Point &p2);\n\n    void computeCosts(\n            const Mat &image1, const Mat &image2, Point tl1, Point tl2,\n            int comp, Mat_<float> &costV, Mat_<float> &costH);\n\n    bool estimateSeam(\n            const Mat &image1, const Mat &image2, Point tl1, Point tl2, int comp,\n            Point p1, Point p2, std::vector<Point> &seam, bool &isHorizontal);\n\n    void updateLabelsUsingSeam(\n            int comp1, int comp2, const std::vector<Point> &seam, bool isHorizontalSeam);\n\n    CostFunction costFunc_;\n\n    // processing images pair data\n    Point unionTl_, unionBr_;\n    Size unionSize_;\n    Mat_<uchar> mask1_, mask2_;\n    Mat_<uchar> contour1mask_, contour2mask_;\n    Mat_<float> gradx1_, grady1_;\n    Mat_<float> gradx2_, grady2_;\n\n    // components data\n    int ncomps_;\n    Mat_<int> labels_;\n    std::vector<ComponentState> states_;\n    std::vector<Point> tls_, brs_;\n    std::vector<std::vector<Point> > contours_;\n    std::set<std::pair<int, int> > edges_;\n};\n\n/** @brief Base class for all minimum graph-cut-based seam estimators.\n */\nclass CV_EXPORTS GraphCutSeamFinderBase\n{\npublic:\n    enum CostType { COST_COLOR, COST_COLOR_GRAD };\n};\n\n/** @brief Minimum graph cut-based seam estimator. See details in @cite V03 .\n */\nclass CV_EXPORTS GraphCutSeamFinder : public GraphCutSeamFinderBase, public SeamFinder\n{\npublic:\n    GraphCutSeamFinder(int cost_type = COST_COLOR_GRAD, float terminal_cost = 10000.f,\n                       float bad_region_penalty = 1000.f);\n\n    ~GraphCutSeamFinder();\n\n    void find(const std::vector<UMat> &src, const std::vector<Point> &corners,\n              std::vector<UMat> &masks);\n\nprivate:\n    // To avoid GCGraph dependency\n    class Impl;\n    Ptr<PairwiseSeamFinder> impl_;\n};\n\n\n#ifdef HAVE_OPENCV_CUDALEGACY\nclass CV_EXPORTS GraphCutSeamFinderGpu : public GraphCutSeamFinderBase, public PairwiseSeamFinder\n{\npublic:\n    GraphCutSeamFinderGpu(int cost_type = COST_COLOR_GRAD, float terminal_cost = 10000.f,\n                          float bad_region_penalty = 1000.f)\n                          : cost_type_(cost_type), terminal_cost_(terminal_cost),\n                            bad_region_penalty_(bad_region_penalty) {}\n\n    void find(const std::vector<cv::UMat> &src, const std::vector<cv::Point> &corners,\n              std::vector<cv::UMat> &masks);\n    void findInPair(size_t first, size_t second, Rect roi);\n\nprivate:\n    void setGraphWeightsColor(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &mask1, const cv::Mat &mask2,\n                              cv::Mat &terminals, cv::Mat &leftT, cv::Mat &rightT, cv::Mat &top, cv::Mat &bottom);\n    void setGraphWeightsColorGrad(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &dx1, const cv::Mat &dx2,\n                                  const cv::Mat &dy1, const cv::Mat &dy2, const cv::Mat &mask1, const cv::Mat &mask2,\n                                  cv::Mat &terminals, cv::Mat &leftT, cv::Mat &rightT, cv::Mat &top, cv::Mat &bottom);\n    std::vector<Mat> dx_, dy_;\n    int cost_type_;\n    float terminal_cost_;\n    float bad_region_penalty_;\n};\n#endif\n\n//! @}\n\n} // namespace detail\n} // namespace cv\n\n#endif // __OPENCV_STITCHING_SEAM_FINDERS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/timelapsers.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n\n#ifndef __OPENCV_STITCHING_TIMELAPSERS_HPP__\n#define __OPENCV_STITCHING_TIMELAPSERS_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv {\nnamespace detail {\n\n//! @addtogroup stitching\n//! @{\n\n//  Base Timelapser class, takes a sequence of images, applies appropriate shift, stores result in dst_.\n\nclass CV_EXPORTS Timelapser\n{\npublic:\n\n    enum {AS_IS, CROP};\n\n    virtual ~Timelapser() {}\n\n    static Ptr<Timelapser> createDefault(int type);\n\n    virtual void initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes);\n    virtual void process(InputArray img, InputArray mask, Point tl);\n    virtual const UMat& getDst() {return dst_;}\n\nprotected:\n\n    virtual bool test_point(Point pt);\n\n    UMat dst_;\n    Rect dst_roi_;\n};\n\n\nclass CV_EXPORTS TimelapserCrop : public Timelapser\n{\npublic:\n    virtual void initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes);\n};\n\n//! @}\n\n} // namespace detail\n} // namespace cv\n\n#endif // __OPENCV_STITCHING_TIMELAPSERS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/util.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_UTIL_HPP__\n#define __OPENCV_STITCHING_UTIL_HPP__\n\n#include <list>\n#include \"opencv2/core.hpp\"\n\n#ifndef ENABLE_LOG\n#define ENABLE_LOG 0\n#endif\n\n// TODO remove LOG macros, add logging class\n#if ENABLE_LOG\n#ifdef ANDROID\n  #include <iostream>\n  #include <sstream>\n  #include <android/log.h>\n  #define LOG_STITCHING_MSG(msg) \\\n    do { \\\n        Stringstream _os; \\\n        _os << msg; \\\n       __android_log_print(ANDROID_LOG_DEBUG, \"STITCHING\", \"%s\", _os.str().c_str()); \\\n    } while(0);\n#else\n  #include <iostream>\n  #define LOG_STITCHING_MSG(msg) for(;;) { std::cout << msg; std::cout.flush(); break; }\n#endif\n#else\n  #define LOG_STITCHING_MSG(msg)\n#endif\n\n#define LOG_(_level, _msg)                     \\\n    for(;;)                                    \\\n    {                                          \\\n        using namespace std;                   \\\n        if ((_level) >= ::cv::detail::stitchingLogLevel()) \\\n        {                                      \\\n            LOG_STITCHING_MSG(_msg);           \\\n        }                                      \\\n    break;                                 \\\n    }\n\n\n#define LOG(msg) LOG_(1, msg)\n#define LOG_CHAT(msg) LOG_(0, msg)\n\n#define LOGLN(msg) LOG(msg << std::endl)\n#define LOGLN_CHAT(msg) LOG_CHAT(msg << std::endl)\n\n//#if DEBUG_LOG_CHAT\n//  #define LOG_CHAT(msg) LOG(msg)\n//  #define LOGLN_CHAT(msg) LOGLN(msg)\n//#else\n//  #define LOG_CHAT(msg) do{}while(0)\n//  #define LOGLN_CHAT(msg) do{}while(0)\n//#endif\n\nnamespace cv {\nnamespace detail {\n\n//! @addtogroup stitching\n//! @{\n\nclass CV_EXPORTS DisjointSets\n{\npublic:\n    DisjointSets(int elem_count = 0) { createOneElemSets(elem_count); }\n\n    void createOneElemSets(int elem_count);\n    int findSetByElem(int elem);\n    int mergeSets(int set1, int set2);\n\n    std::vector<int> parent;\n    std::vector<int> size;\n\nprivate:\n    std::vector<int> rank_;\n};\n\n\nstruct CV_EXPORTS GraphEdge\n{\n    GraphEdge(int from, int to, float weight);\n    bool operator <(const GraphEdge& other) const { return weight < other.weight; }\n    bool operator >(const GraphEdge& other) const { return weight > other.weight; }\n\n    int from, to;\n    float weight;\n};\n\ninline GraphEdge::GraphEdge(int _from, int _to, float _weight) : from(_from), to(_to), weight(_weight) {}\n\n\nclass CV_EXPORTS Graph\n{\npublic:\n    Graph(int num_vertices = 0) { create(num_vertices); }\n    void create(int num_vertices) { edges_.assign(num_vertices, std::list<GraphEdge>()); }\n    int numVertices() const { return static_cast<int>(edges_.size()); }\n    void addEdge(int from, int to, float weight);\n    template <typename B> B forEach(B body) const;\n    template <typename B> B walkBreadthFirst(int from, B body) const;\n\nprivate:\n    std::vector< std::list<GraphEdge> > edges_;\n};\n\n\n//////////////////////////////////////////////////////////////////////////////\n// Auxiliary functions\n\nCV_EXPORTS bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi);\nCV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<UMat> &images);\nCV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<Size> &sizes);\nCV_EXPORTS Rect resultRoiIntersection(const std::vector<Point> &corners, const std::vector<Size> &sizes);\nCV_EXPORTS Point resultTl(const std::vector<Point> &corners);\n\n// Returns random 'count' element subset of the {0,1,...,size-1} set\nCV_EXPORTS void selectRandomSubset(int count, int size, std::vector<int> &subset);\n\nCV_EXPORTS int& stitchingLogLevel();\n\n//! @}\n\n} // namespace detail\n} // namespace cv\n\n#include \"util_inl.hpp\"\n\n#endif // __OPENCV_STITCHING_UTIL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/util_inl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_UTIL_INL_HPP__\n#define __OPENCV_STITCHING_UTIL_INL_HPP__\n\n#include <queue>\n#include \"opencv2/core.hpp\"\n#include \"util.hpp\" // Make your IDE see declarations\n\n//! @cond IGNORED\n\nnamespace cv {\nnamespace detail {\n\ntemplate <typename B>\nB Graph::forEach(B body) const\n{\n    for (int i = 0; i < numVertices(); ++i)\n    {\n        std::list<GraphEdge>::const_iterator edge = edges_[i].begin();\n        for (; edge != edges_[i].end(); ++edge)\n            body(*edge);\n    }\n    return body;\n}\n\n\ntemplate <typename B>\nB Graph::walkBreadthFirst(int from, B body) const\n{\n    std::vector<bool> was(numVertices(), false);\n    std::queue<int> vertices;\n\n    was[from] = true;\n    vertices.push(from);\n\n    while (!vertices.empty())\n    {\n        int vertex = vertices.front();\n        vertices.pop();\n\n        std::list<GraphEdge>::const_iterator edge = edges_[vertex].begin();\n        for (; edge != edges_[vertex].end(); ++edge)\n        {\n            if (!was[edge->to])\n            {\n                body(*edge);\n                was[edge->to] = true;\n                vertices.push(edge->to);\n            }\n        }\n    }\n\n    return body;\n}\n\n\n//////////////////////////////////////////////////////////////////////////////\n// Some auxiliary math functions\n\nstatic inline\nfloat normL2(const Point3f& a)\n{\n    return a.x * a.x + a.y * a.y + a.z * a.z;\n}\n\n\nstatic inline\nfloat normL2(const Point3f& a, const Point3f& b)\n{\n    return normL2(a - b);\n}\n\n\nstatic inline\ndouble normL2sq(const Mat &r)\n{\n    return r.dot(r);\n}\n\n\nstatic inline int sqr(int x) { return x * x; }\nstatic inline float sqr(float x) { return x * x; }\nstatic inline double sqr(double x) { return x * x; }\n\n} // namespace detail\n} // namespace cv\n\n//! @endcond\n\n#endif // __OPENCV_STITCHING_UTIL_INL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/warpers.hpp",
    "content": " /*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_WARPERS_HPP__\n#define __OPENCV_STITCHING_WARPERS_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/core/cuda.hpp\"\n#include \"opencv2/imgproc.hpp\"\n#include \"opencv2/opencv_modules.hpp\"\n\nnamespace cv {\nnamespace detail {\n\n//! @addtogroup stitching_warp\n//! @{\n\n/** @brief Rotation-only model image warper interface.\n */\nclass CV_EXPORTS RotationWarper\n{\npublic:\n    virtual ~RotationWarper() {}\n\n    /** @brief Projects the image point.\n\n    @param pt Source point\n    @param K Camera intrinsic parameters\n    @param R Camera rotation matrix\n    @return Projected point\n     */\n    virtual Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) = 0;\n\n    /** @brief Builds the projection maps according to the given camera data.\n\n    @param src_size Source image size\n    @param K Camera intrinsic parameters\n    @param R Camera rotation matrix\n    @param xmap Projection map for the x axis\n    @param ymap Projection map for the y axis\n    @return Projected image minimum bounding box\n     */\n    virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) = 0;\n\n    /** @brief Projects the image.\n\n    @param src Source image\n    @param K Camera intrinsic parameters\n    @param R Camera rotation matrix\n    @param interp_mode Interpolation mode\n    @param border_mode Border extrapolation mode\n    @param dst Projected image\n    @return Project image top-left corner\n     */\n    virtual Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,\n                       OutputArray dst) = 0;\n\n    /** @brief Projects the image backward.\n\n    @param src Projected image\n    @param K Camera intrinsic parameters\n    @param R Camera rotation matrix\n    @param interp_mode Interpolation mode\n    @param border_mode Border extrapolation mode\n    @param dst_size Backward-projected image size\n    @param dst Backward-projected image\n     */\n    virtual void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,\n                              Size dst_size, OutputArray dst) = 0;\n\n    /**\n    @param src_size Source image bounding box\n    @param K Camera intrinsic parameters\n    @param R Camera rotation matrix\n    @return Projected image minimum bounding box\n     */\n    virtual Rect warpRoi(Size src_size, InputArray K, InputArray R) = 0;\n\n    virtual float getScale() const { return 1.f; }\n    virtual void setScale(float) {}\n};\n\n/** @brief Base class for warping logic implementation.\n */\nstruct CV_EXPORTS ProjectorBase\n{\n    void setCameraParams(InputArray K = Mat::eye(3, 3, CV_32F),\n                         InputArray R = Mat::eye(3, 3, CV_32F),\n                         InputArray T = Mat::zeros(3, 1, CV_32F));\n\n    float scale;\n    float k[9];\n    float rinv[9];\n    float r_kinv[9];\n    float k_rinv[9];\n    float t[3];\n};\n\n/** @brief Base class for rotation-based warper using a detail::ProjectorBase_ derived class.\n */\ntemplate <class P>\nclass CV_EXPORTS RotationWarperBase : public RotationWarper\n{\npublic:\n    Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R);\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);\n\n    Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,\n               OutputArray dst);\n\n    void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,\n                      Size dst_size, OutputArray dst);\n\n    Rect warpRoi(Size src_size, InputArray K, InputArray R);\n\n    float getScale() const { return projector_.scale; }\n    void setScale(float val) { projector_.scale = val; }\n\nprotected:\n\n    // Detects ROI of the destination image. It's correct for any projection.\n    virtual void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br);\n\n    // Detects ROI of the destination image by walking over image border.\n    // Correctness for any projection isn't guaranteed.\n    void detectResultRoiByBorder(Size src_size, Point &dst_tl, Point &dst_br);\n\n    P projector_;\n};\n\n\nstruct CV_EXPORTS PlaneProjector : ProjectorBase\n{\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n/** @brief Warper that maps an image onto the z = 1 plane.\n */\nclass CV_EXPORTS PlaneWarper : public RotationWarperBase<PlaneProjector>\n{\npublic:\n    /** @brief Construct an instance of the plane warper class.\n\n    @param scale Projected image scale multiplier\n     */\n    PlaneWarper(float scale = 1.f) { projector_.scale = scale; }\n\n    Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R);\n    Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R, InputArray T);\n\n    virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap);\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);\n\n    Point warp(InputArray src, InputArray K, InputArray R,\n               int interp_mode, int border_mode, OutputArray dst);\n    virtual Point warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,\n               OutputArray dst);\n\n    Rect warpRoi(Size src_size, InputArray K, InputArray R);\n    Rect warpRoi(Size src_size, InputArray K, InputArray R, InputArray T);\n\nprotected:\n    void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br);\n};\n\n\nstruct CV_EXPORTS SphericalProjector : ProjectorBase\n{\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\n/** @brief Warper that maps an image onto the unit sphere located at the origin.\n\n Projects image onto unit sphere with origin at (0, 0, 0).\n Poles are located at (0, -1, 0) and (0, 1, 0) points.\n*/\nclass CV_EXPORTS SphericalWarper : public RotationWarperBase<SphericalProjector>\n{\npublic:\n    /** @brief Construct an instance of the spherical warper class.\n\n    @param scale Projected image scale multiplier\n     */\n    SphericalWarper(float scale) { projector_.scale = scale; }\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);\n    Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst);\nprotected:\n    void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br);\n};\n\n\nstruct CV_EXPORTS CylindricalProjector : ProjectorBase\n{\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\n/** @brief Warper that maps an image onto the x\\*x + z\\*z = 1 cylinder.\n */\nclass CV_EXPORTS CylindricalWarper : public RotationWarperBase<CylindricalProjector>\n{\npublic:\n    /** @brief Construct an instance of the cylindrical warper class.\n\n    @param scale Projected image scale multiplier\n     */\n    CylindricalWarper(float scale) { projector_.scale = scale; }\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);\n    Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst);\nprotected:\n    void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)\n    {\n        RotationWarperBase<CylindricalProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);\n    }\n};\n\n\nstruct CV_EXPORTS FisheyeProjector : ProjectorBase\n{\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\nclass CV_EXPORTS FisheyeWarper : public RotationWarperBase<FisheyeProjector>\n{\npublic:\n    FisheyeWarper(float scale) { projector_.scale = scale; }\n};\n\n\nstruct CV_EXPORTS StereographicProjector : ProjectorBase\n{\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\nclass CV_EXPORTS StereographicWarper : public RotationWarperBase<StereographicProjector>\n{\npublic:\n    StereographicWarper(float scale) { projector_.scale = scale; }\n};\n\n\nstruct CV_EXPORTS CompressedRectilinearProjector : ProjectorBase\n{\n    float a, b;\n\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\nclass CV_EXPORTS CompressedRectilinearWarper : public RotationWarperBase<CompressedRectilinearProjector>\n{\npublic:\n    CompressedRectilinearWarper(float scale, float A = 1, float B = 1)\n    {\n        projector_.a = A;\n        projector_.b = B;\n        projector_.scale = scale;\n    }\n};\n\n\nstruct CV_EXPORTS CompressedRectilinearPortraitProjector : ProjectorBase\n{\n    float a, b;\n\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\nclass CV_EXPORTS CompressedRectilinearPortraitWarper : public RotationWarperBase<CompressedRectilinearPortraitProjector>\n{\npublic:\n   CompressedRectilinearPortraitWarper(float scale, float A = 1, float B = 1)\n   {\n       projector_.a = A;\n       projector_.b = B;\n       projector_.scale = scale;\n   }\n};\n\n\nstruct CV_EXPORTS PaniniProjector : ProjectorBase\n{\n    float a, b;\n\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\nclass CV_EXPORTS PaniniWarper : public RotationWarperBase<PaniniProjector>\n{\npublic:\n   PaniniWarper(float scale, float A = 1, float B = 1)\n   {\n       projector_.a = A;\n       projector_.b = B;\n       projector_.scale = scale;\n   }\n};\n\n\nstruct CV_EXPORTS PaniniPortraitProjector : ProjectorBase\n{\n    float a, b;\n\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\nclass CV_EXPORTS PaniniPortraitWarper : public RotationWarperBase<PaniniPortraitProjector>\n{\npublic:\n   PaniniPortraitWarper(float scale, float A = 1, float B = 1)\n   {\n       projector_.a = A;\n       projector_.b = B;\n       projector_.scale = scale;\n   }\n\n};\n\n\nstruct CV_EXPORTS MercatorProjector : ProjectorBase\n{\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\nclass CV_EXPORTS MercatorWarper : public RotationWarperBase<MercatorProjector>\n{\npublic:\n    MercatorWarper(float scale) { projector_.scale = scale; }\n};\n\n\nstruct CV_EXPORTS TransverseMercatorProjector : ProjectorBase\n{\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\nclass CV_EXPORTS TransverseMercatorWarper : public RotationWarperBase<TransverseMercatorProjector>\n{\npublic:\n    TransverseMercatorWarper(float scale) { projector_.scale = scale; }\n};\n\n\nclass CV_EXPORTS PlaneWarperGpu : public PlaneWarper\n{\npublic:\n    PlaneWarperGpu(float scale = 1.f) : PlaneWarper(scale) {}\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)\n    {\n        Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);\n        d_xmap_.download(xmap);\n        d_ymap_.download(ymap);\n        return result;\n    }\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap)\n    {\n        Rect result = buildMaps(src_size, K, R, T, d_xmap_, d_ymap_);\n        d_xmap_.download(xmap);\n        d_ymap_.download(ymap);\n        return result;\n    }\n\n    Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,\n               OutputArray dst)\n    {\n        d_src_.upload(src);\n        Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);\n        d_dst_.download(dst);\n        return result;\n    }\n\n    Point warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,\n               OutputArray dst)\n    {\n        d_src_.upload(src);\n        Point result = warp(d_src_, K, R, T, interp_mode, border_mode, d_dst_);\n        d_dst_.download(dst);\n        return result;\n    }\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap);\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, cuda::GpuMat & xmap, cuda::GpuMat & ymap);\n\n    Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode,\n               cuda::GpuMat & dst);\n\n    Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,\n               cuda::GpuMat & dst);\n\nprivate:\n    cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;\n};\n\n\nclass CV_EXPORTS SphericalWarperGpu : public SphericalWarper\n{\npublic:\n    SphericalWarperGpu(float scale) : SphericalWarper(scale) {}\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)\n    {\n        Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);\n        d_xmap_.download(xmap);\n        d_ymap_.download(ymap);\n        return result;\n    }\n\n    Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,\n               OutputArray dst)\n    {\n        d_src_.upload(src);\n        Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);\n        d_dst_.download(dst);\n        return result;\n    }\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap);\n\n    Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode,\n               cuda::GpuMat & dst);\n\nprivate:\n    cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;\n};\n\n\nclass CV_EXPORTS CylindricalWarperGpu : public CylindricalWarper\n{\npublic:\n    CylindricalWarperGpu(float scale) : CylindricalWarper(scale) {}\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)\n    {\n        Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);\n        d_xmap_.download(xmap);\n        d_ymap_.download(ymap);\n        return result;\n    }\n\n    Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,\n               OutputArray dst)\n    {\n        d_src_.upload(src);\n        Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);\n        d_dst_.download(dst);\n        return result;\n    }\n\n    Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap);\n\n    Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode,\n               cuda::GpuMat & dst);\n\nprivate:\n    cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;\n};\n\n\nstruct SphericalPortraitProjector : ProjectorBase\n{\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\n// Projects image onto unit sphere with origin at (0, 0, 0).\n// Poles are located NOT at (0, -1, 0) and (0, 1, 0) points, BUT at (1, 0, 0) and (-1, 0, 0) points.\nclass CV_EXPORTS SphericalPortraitWarper : public RotationWarperBase<SphericalPortraitProjector>\n{\npublic:\n    SphericalPortraitWarper(float scale) { projector_.scale = scale; }\n\nprotected:\n    void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br);\n};\n\nstruct CylindricalPortraitProjector : ProjectorBase\n{\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\nclass CV_EXPORTS CylindricalPortraitWarper : public RotationWarperBase<CylindricalPortraitProjector>\n{\npublic:\n    CylindricalPortraitWarper(float scale) { projector_.scale = scale; }\n\nprotected:\n    void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)\n    {\n        RotationWarperBase<CylindricalPortraitProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);\n    }\n};\n\nstruct PlanePortraitProjector : ProjectorBase\n{\n    void mapForward(float x, float y, float &u, float &v);\n    void mapBackward(float u, float v, float &x, float &y);\n};\n\n\nclass CV_EXPORTS PlanePortraitWarper : public RotationWarperBase<PlanePortraitProjector>\n{\npublic:\n    PlanePortraitWarper(float scale) { projector_.scale = scale; }\n\nprotected:\n    void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)\n    {\n        RotationWarperBase<PlanePortraitProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);\n    }\n};\n\n//! @} stitching_warp\n\n} // namespace detail\n} // namespace cv\n\n#include \"warpers_inl.hpp\"\n\n#endif // __OPENCV_STITCHING_WARPERS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/detail/warpers_inl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_WARPERS_INL_HPP__\n#define __OPENCV_STITCHING_WARPERS_INL_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"warpers.hpp\" // Make your IDE see declarations\n#include <limits>\n\n//! @cond IGNORED\n\nnamespace cv {\nnamespace detail {\n\ntemplate <class P>\nPoint2f RotationWarperBase<P>::warpPoint(const Point2f &pt, InputArray K, InputArray R)\n{\n    projector_.setCameraParams(K, R);\n    Point2f uv;\n    projector_.mapForward(pt.x, pt.y, uv.x, uv.y);\n    return uv;\n}\n\n\ntemplate <class P>\nRect RotationWarperBase<P>::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray _xmap, OutputArray _ymap)\n{\n    projector_.setCameraParams(K, R);\n\n    Point dst_tl, dst_br;\n    detectResultRoi(src_size, dst_tl, dst_br);\n\n    _xmap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);\n    _ymap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);\n\n    Mat xmap = _xmap.getMat(), ymap = _ymap.getMat();\n\n    float x, y;\n    for (int v = dst_tl.y; v <= dst_br.y; ++v)\n    {\n        for (int u = dst_tl.x; u <= dst_br.x; ++u)\n        {\n            projector_.mapBackward(static_cast<float>(u), static_cast<float>(v), x, y);\n            xmap.at<float>(v - dst_tl.y, u - dst_tl.x) = x;\n            ymap.at<float>(v - dst_tl.y, u - dst_tl.x) = y;\n        }\n    }\n\n    return Rect(dst_tl, dst_br);\n}\n\n\ntemplate <class P>\nPoint RotationWarperBase<P>::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,\n                                  OutputArray dst)\n{\n    UMat xmap, ymap;\n    Rect dst_roi = buildMaps(src.size(), K, R, xmap, ymap);\n\n    dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());\n    remap(src, dst, xmap, ymap, interp_mode, border_mode);\n\n    return dst_roi.tl();\n}\n\n\ntemplate <class P>\nvoid RotationWarperBase<P>::warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,\n                                         Size dst_size, OutputArray dst)\n{\n    projector_.setCameraParams(K, R);\n\n    Point src_tl, src_br;\n    detectResultRoi(dst_size, src_tl, src_br);\n\n    Size size = src.size();\n    CV_Assert(src_br.x - src_tl.x + 1 == size.width && src_br.y - src_tl.y + 1 == size.height);\n\n    Mat xmap(dst_size, CV_32F);\n    Mat ymap(dst_size, CV_32F);\n\n    float u, v;\n    for (int y = 0; y < dst_size.height; ++y)\n    {\n        for (int x = 0; x < dst_size.width; ++x)\n        {\n            projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);\n            xmap.at<float>(y, x) = u - src_tl.x;\n            ymap.at<float>(y, x) = v - src_tl.y;\n        }\n    }\n\n    dst.create(dst_size, src.type());\n    remap(src, dst, xmap, ymap, interp_mode, border_mode);\n}\n\n\ntemplate <class P>\nRect RotationWarperBase<P>::warpRoi(Size src_size, InputArray K, InputArray R)\n{\n    projector_.setCameraParams(K, R);\n\n    Point dst_tl, dst_br;\n    detectResultRoi(src_size, dst_tl, dst_br);\n\n    return Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1));\n}\n\n\ntemplate <class P>\nvoid RotationWarperBase<P>::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)\n{\n    float tl_uf = std::numeric_limits<float>::max();\n    float tl_vf = std::numeric_limits<float>::max();\n    float br_uf = -std::numeric_limits<float>::max();\n    float br_vf = -std::numeric_limits<float>::max();\n\n    float u, v;\n    for (int y = 0; y < src_size.height; ++y)\n    {\n        for (int x = 0; x < src_size.width; ++x)\n        {\n            projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);\n            tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);\n            br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);\n        }\n    }\n\n    dst_tl.x = static_cast<int>(tl_uf);\n    dst_tl.y = static_cast<int>(tl_vf);\n    dst_br.x = static_cast<int>(br_uf);\n    dst_br.y = static_cast<int>(br_vf);\n}\n\n\ntemplate <class P>\nvoid RotationWarperBase<P>::detectResultRoiByBorder(Size src_size, Point &dst_tl, Point &dst_br)\n{\n    float tl_uf = std::numeric_limits<float>::max();\n    float tl_vf = std::numeric_limits<float>::max();\n    float br_uf = -std::numeric_limits<float>::max();\n    float br_vf = -std::numeric_limits<float>::max();\n\n    float u, v;\n    for (float x = 0; x < src_size.width; ++x)\n    {\n        projector_.mapForward(static_cast<float>(x), 0, u, v);\n        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);\n        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);\n\n        projector_.mapForward(static_cast<float>(x), static_cast<float>(src_size.height - 1), u, v);\n        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);\n        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);\n    }\n    for (int y = 0; y < src_size.height; ++y)\n    {\n        projector_.mapForward(0, static_cast<float>(y), u, v);\n        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);\n        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);\n\n        projector_.mapForward(static_cast<float>(src_size.width - 1), static_cast<float>(y), u, v);\n        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);\n        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);\n    }\n\n    dst_tl.x = static_cast<int>(tl_uf);\n    dst_tl.y = static_cast<int>(tl_vf);\n    dst_br.x = static_cast<int>(br_uf);\n    dst_br.y = static_cast<int>(br_vf);\n}\n\n\ninline\nvoid PlaneProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    x_ = t[0] + x_ / z_ * (1 - t[2]);\n    y_ = t[1] + y_ / z_ * (1 - t[2]);\n\n    u = scale * x_;\n    v = scale * y_;\n}\n\n\ninline\nvoid PlaneProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u = u / scale - t[0];\n    v = v / scale - t[1];\n\n    float z;\n    x = k_rinv[0] * u + k_rinv[1] * v + k_rinv[2] * (1 - t[2]);\n    y = k_rinv[3] * u + k_rinv[4] * v + k_rinv[5] * (1 - t[2]);\n    z = k_rinv[6] * u + k_rinv[7] * v + k_rinv[8] * (1 - t[2]);\n\n    x /= z;\n    y /= z;\n}\n\n\ninline\nvoid SphericalProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    u = scale * atan2f(x_, z_);\n    float w = y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_);\n    v = scale * (static_cast<float>(CV_PI) - acosf(w == w ? w : 0));\n}\n\n\ninline\nvoid SphericalProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u /= scale;\n    v /= scale;\n\n    float sinv = sinf(static_cast<float>(CV_PI) - v);\n    float x_ = sinv * sinf(u);\n    float y_ = cosf(static_cast<float>(CV_PI) - v);\n    float z_ = sinv * cosf(u);\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\n\ninline\nvoid CylindricalProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    u = scale * atan2f(x_, z_);\n    v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);\n}\n\n\ninline\nvoid CylindricalProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u /= scale;\n    v /= scale;\n\n    float x_ = sinf(u);\n    float y_ = v;\n    float z_ = cosf(u);\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid FisheyeProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float u_ = atan2f(x_, z_);\n    float v_ = (float)CV_PI - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));\n\n    u = scale * v_ * cosf(u_);\n    v = scale * v_ * sinf(u_);\n}\n\ninline\nvoid FisheyeProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u /= scale;\n    v /= scale;\n\n    float u_ = atan2f(v, u);\n    float v_ = sqrtf(u*u + v*v);\n\n    float sinv = sinf((float)CV_PI - v_);\n    float x_ = sinv * sinf(u_);\n    float y_ = cosf((float)CV_PI - v_);\n    float z_ = sinv * cosf(u_);\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid StereographicProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float u_ = atan2f(x_, z_);\n    float v_ = (float)CV_PI - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));\n\n    float r = sinf(v_) / (1 - cosf(v_));\n\n    u = scale * r * cos(u_);\n    v = scale * r * sin(u_);\n}\n\ninline\nvoid StereographicProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u /= scale;\n    v /= scale;\n\n    float u_ = atan2f(v, u);\n    float r = sqrtf(u*u + v*v);\n    float v_ = 2 * atanf(1.f / r);\n\n    float sinv = sinf((float)CV_PI - v_);\n    float x_ = sinv * sinf(u_);\n    float y_ = cosf((float)CV_PI - v_);\n    float z_ = sinv * cosf(u_);\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid CompressedRectilinearProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float u_ = atan2f(x_, z_);\n    float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));\n\n    u = scale * a * tanf(u_ / a);\n    v = scale * b * tanf(v_) / cosf(u_);\n}\n\ninline\nvoid CompressedRectilinearProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u /= scale;\n    v /= scale;\n\n    float aatg = a * atanf(u / a);\n    float u_ = aatg;\n    float v_ = atanf(v * cosf(aatg) / b);\n\n    float cosv = cosf(v_);\n    float x_ = cosv * sinf(u_);\n    float y_ = sinf(v_);\n    float z_ = cosv * cosf(u_);\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid CompressedRectilinearPortraitProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float y_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float x_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float u_ = atan2f(x_, z_);\n    float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));\n\n    u = - scale * a * tanf(u_ / a);\n    v = scale * b * tanf(v_) / cosf(u_);\n}\n\ninline\nvoid CompressedRectilinearPortraitProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u /= - scale;\n    v /= scale;\n\n    float aatg = a * atanf(u / a);\n    float u_ = aatg;\n    float v_ = atanf(v * cosf( aatg ) / b);\n\n    float cosv = cosf(v_);\n    float y_ = cosv * sinf(u_);\n    float x_ = sinf(v_);\n    float z_ = cosv * cosf(u_);\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid PaniniProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float u_ = atan2f(x_, z_);\n    float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));\n\n    float tg = a * tanf(u_ / a);\n    u = scale * tg;\n\n    float sinu = sinf(u_);\n    if ( fabs(sinu) < 1E-7 )\n        v = scale * b * tanf(v_);\n    else\n        v = scale * b * tg * tanf(v_) / sinu;\n}\n\ninline\nvoid PaniniProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u /= scale;\n    v /= scale;\n\n    float lamda = a * atanf(u / a);\n    float u_ = lamda;\n\n    float v_;\n    if ( fabs(lamda) > 1E-7)\n        v_ = atanf(v * sinf(lamda) / (b * a * tanf(lamda / a)));\n    else\n        v_ = atanf(v / b);\n\n    float cosv = cosf(v_);\n    float x_ = cosv * sinf(u_);\n    float y_ = sinf(v_);\n    float z_ = cosv * cosf(u_);\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid PaniniPortraitProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float y_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float x_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float u_ = atan2f(x_, z_);\n    float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));\n\n    float tg = a * tanf(u_ / a);\n    u = - scale * tg;\n\n    float sinu = sinf( u_ );\n    if ( fabs(sinu) < 1E-7 )\n        v = scale * b * tanf(v_);\n    else\n        v = scale * b * tg * tanf(v_) / sinu;\n}\n\ninline\nvoid PaniniPortraitProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u /= - scale;\n    v /= scale;\n\n    float lamda = a * atanf(u / a);\n    float u_ = lamda;\n\n    float v_;\n    if ( fabs(lamda) > 1E-7)\n        v_ = atanf(v * sinf(lamda) / (b * a * tanf(lamda/a)));\n    else\n        v_ = atanf(v / b);\n\n    float cosv = cosf(v_);\n    float y_ = cosv * sinf(u_);\n    float x_ = sinf(v_);\n    float z_ = cosv * cosf(u_);\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid MercatorProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float u_ = atan2f(x_, z_);\n    float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));\n\n    u = scale * u_;\n    v = scale * logf( tanf( (float)(CV_PI/4) + v_/2 ) );\n}\n\ninline\nvoid MercatorProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u /= scale;\n    v /= scale;\n\n    float v_ = atanf( sinhf(v) );\n    float u_ = u;\n\n    float cosv = cosf(v_);\n    float x_ = cosv * sinf(u_);\n    float y_ = sinf(v_);\n    float z_ = cosv * cosf(u_);\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid TransverseMercatorProjector::mapForward(float x, float y, float &u, float &v)\n{\n    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float u_ = atan2f(x_, z_);\n    float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));\n\n    float B = cosf(v_) * sinf(u_);\n\n    u = scale / 2 * logf( (1+B) / (1-B) );\n    v = scale * atan2f(tanf(v_), cosf(u_));\n}\n\ninline\nvoid TransverseMercatorProjector::mapBackward(float u, float v, float &x, float &y)\n{\n    u /= scale;\n    v /= scale;\n\n    float v_ = asinf( sinf(v) / coshf(u) );\n    float u_ = atan2f( sinhf(u), cos(v) );\n\n    float cosv = cosf(v_);\n    float x_ = cosv * sinf(u_);\n    float y_ = sinf(v_);\n    float z_ = cosv * cosf(u_);\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid SphericalPortraitProjector::mapForward(float x, float y, float &u0, float &v0)\n{\n    float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float x_ = y0_;\n    float y_ = x0_;\n    float u, v;\n\n    u = scale * atan2f(x_, z_);\n    v = scale * (static_cast<float>(CV_PI) - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)));\n\n    u0 = -u;//v;\n    v0 = v;//u;\n}\n\n\ninline\nvoid SphericalPortraitProjector::mapBackward(float u0, float v0, float &x, float &y)\n{\n    float u, v;\n    u = -u0;//v0;\n    v = v0;//u0;\n\n    u /= scale;\n    v /= scale;\n\n    float sinv = sinf(static_cast<float>(CV_PI) - v);\n    float x0_ = sinv * sinf(u);\n    float y0_ = cosf(static_cast<float>(CV_PI) - v);\n    float z_ = sinv * cosf(u);\n\n    float x_ = y0_;\n    float y_ = x0_;\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid CylindricalPortraitProjector::mapForward(float x, float y, float &u0, float &v0)\n{\n    float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_  = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float x_ = y0_;\n    float y_ = x0_;\n    float u, v;\n\n    u = scale * atan2f(x_, z_);\n    v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);\n\n    u0 = -u;//v;\n    v0 = v;//u;\n}\n\n\ninline\nvoid CylindricalPortraitProjector::mapBackward(float u0, float v0, float &x, float &y)\n{\n    float u, v;\n    u = -u0;//v0;\n    v = v0;//u0;\n\n    u /= scale;\n    v /= scale;\n\n    float x0_ = sinf(u);\n    float y0_ = v;\n    float z_  = cosf(u);\n\n    float x_ = y0_;\n    float y_ = x0_;\n\n    float z;\n    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\n    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\n    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\n\n    if (z > 0) { x /= z; y /= z; }\n    else x = y = -1;\n}\n\ninline\nvoid PlanePortraitProjector::mapForward(float x, float y, float &u0, float &v0)\n{\n    float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\n    float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\n    float z_  = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\n\n    float x_ = y0_;\n    float y_ = x0_;\n\n    x_ = t[0] + x_ / z_ * (1 - t[2]);\n    y_ = t[1] + y_ / z_ * (1 - t[2]);\n\n    float u,v;\n    u = scale * x_;\n    v = scale * y_;\n\n    u0 = -u;\n    v0 = v;\n}\n\n\ninline\nvoid PlanePortraitProjector::mapBackward(float u0, float v0, float &x, float &y)\n{\n    float u, v;\n    u = -u0;\n    v = v0;\n\n    u = u / scale - t[0];\n    v = v / scale - t[1];\n\n    float z;\n    x = k_rinv[0] * v + k_rinv[1] * u + k_rinv[2] * (1 - t[2]);\n    y = k_rinv[3] * v + k_rinv[4] * u + k_rinv[5] * (1 - t[2]);\n    z = k_rinv[6] * v + k_rinv[7] * u + k_rinv[8] * (1 - t[2]);\n\n    x /= z;\n    y /= z;\n}\n\n\n} // namespace detail\n} // namespace cv\n\n//! @endcond\n\n#endif // __OPENCV_STITCHING_WARPERS_INL_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching/warpers.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_WARPER_CREATORS_HPP__\n#define __OPENCV_STITCHING_WARPER_CREATORS_HPP__\n\n#include \"opencv2/stitching/detail/warpers.hpp\"\n\nnamespace cv {\n\n//! @addtogroup stitching_warp\n//! @{\n\n/** @brief Image warper factories base class.\n */\nclass WarperCreator\n{\npublic:\n    virtual ~WarperCreator() {}\n    virtual Ptr<detail::RotationWarper> create(float scale) const = 0;\n};\n\n/** @brief Plane warper factory class.\n  @sa detail::PlaneWarper\n */\nclass PlaneWarper : public WarperCreator\n{\npublic:\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::PlaneWarper>(scale); }\n};\n\n/** @brief Cylindrical warper factory class.\n@sa detail::CylindricalWarper\n*/\nclass CylindricalWarper: public WarperCreator\n{\npublic:\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::CylindricalWarper>(scale); }\n};\n\n/** @brief Spherical warper factory class */\nclass SphericalWarper: public WarperCreator\n{\npublic:\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::SphericalWarper>(scale); }\n};\n\nclass FisheyeWarper : public WarperCreator\n{\npublic:\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::FisheyeWarper>(scale); }\n};\n\nclass StereographicWarper: public WarperCreator\n{\npublic:\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::StereographicWarper>(scale); }\n};\n\nclass CompressedRectilinearWarper: public WarperCreator\n{\n    float a, b;\npublic:\n    CompressedRectilinearWarper(float A = 1, float B = 1)\n    {\n        a = A; b = B;\n    }\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::CompressedRectilinearWarper>(scale, a, b); }\n};\n\nclass CompressedRectilinearPortraitWarper: public WarperCreator\n{\n    float a, b;\npublic:\n    CompressedRectilinearPortraitWarper(float A = 1, float B = 1)\n    {\n        a = A; b = B;\n    }\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::CompressedRectilinearPortraitWarper>(scale, a, b); }\n};\n\nclass PaniniWarper: public WarperCreator\n{\n    float a, b;\npublic:\n    PaniniWarper(float A = 1, float B = 1)\n    {\n        a = A; b = B;\n    }\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::PaniniWarper>(scale, a, b); }\n};\n\nclass PaniniPortraitWarper: public WarperCreator\n{\n    float a, b;\npublic:\n    PaniniPortraitWarper(float A = 1, float B = 1)\n    {\n        a = A; b = B;\n    }\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::PaniniPortraitWarper>(scale, a, b); }\n};\n\nclass MercatorWarper: public WarperCreator\n{\npublic:\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::MercatorWarper>(scale); }\n};\n\nclass TransverseMercatorWarper: public WarperCreator\n{\npublic:\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::TransverseMercatorWarper>(scale); }\n};\n\n\n\n#ifdef HAVE_OPENCV_CUDAWARPING\nclass PlaneWarperGpu: public WarperCreator\n{\npublic:\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::PlaneWarperGpu>(scale); }\n};\n\n\nclass CylindricalWarperGpu: public WarperCreator\n{\npublic:\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::CylindricalWarperGpu>(scale); }\n};\n\n\nclass SphericalWarperGpu: public WarperCreator\n{\npublic:\n    Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::SphericalWarperGpu>(scale); }\n};\n#endif\n\n//! @} stitching_warp\n\n} // namespace cv\n\n#endif // __OPENCV_STITCHING_WARPER_CREATORS_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/stitching.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STITCHING_STITCHER_HPP__\n#define __OPENCV_STITCHING_STITCHER_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/features2d.hpp\"\n#include \"opencv2/stitching/warpers.hpp\"\n#include \"opencv2/stitching/detail/matchers.hpp\"\n#include \"opencv2/stitching/detail/motion_estimators.hpp\"\n#include \"opencv2/stitching/detail/exposure_compensate.hpp\"\n#include \"opencv2/stitching/detail/seam_finders.hpp\"\n#include \"opencv2/stitching/detail/blenders.hpp\"\n#include \"opencv2/stitching/detail/camera.hpp\"\n\n/**\n@defgroup stitching Images stitching\n\nThis figure illustrates the stitching module pipeline implemented in the Stitcher class. Using that\nclass it's possible to configure/remove some steps, i.e. adjust the stitching pipeline according to\nthe particular needs. All building blocks from the pipeline are available in the detail namespace,\none can combine and use them separately.\n\nThe implemented stitching pipeline is very similar to the one proposed in @cite BL07 .\n\n![image](StitchingPipeline.jpg)\n\n@{\n    @defgroup stitching_match Features Finding and Images Matching\n    @defgroup stitching_rotation Rotation Estimation\n    @defgroup stitching_autocalib Autocalibration\n    @defgroup stitching_warp Images Warping\n    @defgroup stitching_seam Seam Estimation\n    @defgroup stitching_exposure Exposure Compensation\n    @defgroup stitching_blend Image Blenders\n@}\n  */\n\nnamespace cv {\n\n//! @addtogroup stitching\n//! @{\n\n/** @brief High level image stitcher.\n\nIt's possible to use this class without being aware of the entire stitching pipeline. However, to\nbe able to achieve higher stitching stability and quality of the final images at least being\nfamiliar with the theory is recommended.\n\n@note\n   -   A basic example on image stitching can be found at\n        opencv_source_code/samples/cpp/stitching.cpp\n    -   A detailed example on image stitching can be found at\n        opencv_source_code/samples/cpp/stitching_detailed.cpp\n */\nclass CV_EXPORTS_W Stitcher\n{\npublic:\n    enum { ORIG_RESOL = -1 };\n    enum Status\n    {\n        OK = 0,\n        ERR_NEED_MORE_IMGS = 1,\n        ERR_HOMOGRAPHY_EST_FAIL = 2,\n        ERR_CAMERA_PARAMS_ADJUST_FAIL = 3\n    };\n\n   // Stitcher() {}\n    /** @brief Creates a stitcher with the default parameters.\n\n    @param try_use_gpu Flag indicating whether GPU should be used whenever it's possible.\n    @return Stitcher class instance.\n     */\n    static Stitcher createDefault(bool try_use_gpu = false);\n\n    CV_WRAP double registrationResol() const { return registr_resol_; }\n    CV_WRAP void setRegistrationResol(double resol_mpx) { registr_resol_ = resol_mpx; }\n\n    CV_WRAP double seamEstimationResol() const { return seam_est_resol_; }\n    CV_WRAP void setSeamEstimationResol(double resol_mpx) { seam_est_resol_ = resol_mpx; }\n\n    CV_WRAP double compositingResol() const { return compose_resol_; }\n    CV_WRAP void setCompositingResol(double resol_mpx) { compose_resol_ = resol_mpx; }\n\n    CV_WRAP double panoConfidenceThresh() const { return conf_thresh_; }\n    CV_WRAP void setPanoConfidenceThresh(double conf_thresh) { conf_thresh_ = conf_thresh; }\n\n    CV_WRAP bool waveCorrection() const { return do_wave_correct_; }\n    CV_WRAP void setWaveCorrection(bool flag) { do_wave_correct_ = flag; }\n\n    detail::WaveCorrectKind waveCorrectKind() const { return wave_correct_kind_; }\n    void setWaveCorrectKind(detail::WaveCorrectKind kind) { wave_correct_kind_ = kind; }\n\n    Ptr<detail::FeaturesFinder> featuresFinder() { return features_finder_; }\n    const Ptr<detail::FeaturesFinder> featuresFinder() const { return features_finder_; }\n    void setFeaturesFinder(Ptr<detail::FeaturesFinder> features_finder)\n        { features_finder_ = features_finder; }\n\n    Ptr<detail::FeaturesMatcher> featuresMatcher() { return features_matcher_; }\n    const Ptr<detail::FeaturesMatcher> featuresMatcher() const { return features_matcher_; }\n    void setFeaturesMatcher(Ptr<detail::FeaturesMatcher> features_matcher)\n        { features_matcher_ = features_matcher; }\n\n    const cv::UMat& matchingMask() const { return matching_mask_; }\n    void setMatchingMask(const cv::UMat &mask)\n    {\n        CV_Assert(mask.type() == CV_8U && mask.cols == mask.rows);\n        matching_mask_ = mask.clone();\n    }\n\n    Ptr<detail::BundleAdjusterBase> bundleAdjuster() { return bundle_adjuster_; }\n    const Ptr<detail::BundleAdjusterBase> bundleAdjuster() const { return bundle_adjuster_; }\n    void setBundleAdjuster(Ptr<detail::BundleAdjusterBase> bundle_adjuster)\n        { bundle_adjuster_ = bundle_adjuster; }\n\n    Ptr<WarperCreator> warper() { return warper_; }\n    const Ptr<WarperCreator> warper() const { return warper_; }\n    void setWarper(Ptr<WarperCreator> creator) { warper_ = creator; }\n\n    Ptr<detail::ExposureCompensator> exposureCompensator() { return exposure_comp_; }\n    const Ptr<detail::ExposureCompensator> exposureCompensator() const { return exposure_comp_; }\n    void setExposureCompensator(Ptr<detail::ExposureCompensator> exposure_comp)\n        { exposure_comp_ = exposure_comp; }\n\n    Ptr<detail::SeamFinder> seamFinder() { return seam_finder_; }\n    const Ptr<detail::SeamFinder> seamFinder() const { return seam_finder_; }\n    void setSeamFinder(Ptr<detail::SeamFinder> seam_finder) { seam_finder_ = seam_finder; }\n\n    Ptr<detail::Blender> blender() { return blender_; }\n    const Ptr<detail::Blender> blender() const { return blender_; }\n    void setBlender(Ptr<detail::Blender> b) { blender_ = b; }\n\n    /** @overload */\n    CV_WRAP Status estimateTransform(InputArrayOfArrays images);\n    /** @brief These functions try to match the given images and to estimate rotations of each camera.\n\n    @note Use the functions only if you're aware of the stitching pipeline, otherwise use\n    Stitcher::stitch.\n\n    @param images Input images.\n    @param rois Region of interest rectangles.\n    @return Status code.\n     */\n    Status estimateTransform(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois);\n\n    /** @overload */\n    CV_WRAP Status composePanorama(OutputArray pano);\n    /** @brief These functions try to compose the given images (or images stored internally from the other function\n    calls) into the final pano under the assumption that the image transformations were estimated\n    before.\n\n    @note Use the functions only if you're aware of the stitching pipeline, otherwise use\n    Stitcher::stitch.\n\n    @param images Input images.\n    @param pano Final pano.\n    @return Status code.\n     */\n    Status composePanorama(InputArrayOfArrays images, OutputArray pano);\n\n    /** @overload */\n    CV_WRAP Status stitch(InputArrayOfArrays images, OutputArray pano);\n    /** @brief These functions try to stitch the given images.\n\n    @param images Input images.\n    @param rois Region of interest rectangles.\n    @param pano Final pano.\n    @return Status code.\n     */\n    Status stitch(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois, OutputArray pano);\n\n    std::vector<int> component() const { return indices_; }\n    std::vector<detail::CameraParams> cameras() const { return cameras_; }\n    CV_WRAP double workScale() const { return work_scale_; }\n\nprivate:\n    //Stitcher() {}\n\n    Status matchImages();\n    Status estimateCameraParams();\n\n    double registr_resol_;\n    double seam_est_resol_;\n    double compose_resol_;\n    double conf_thresh_;\n    Ptr<detail::FeaturesFinder> features_finder_;\n    Ptr<detail::FeaturesMatcher> features_matcher_;\n    cv::UMat matching_mask_;\n    Ptr<detail::BundleAdjusterBase> bundle_adjuster_;\n    bool do_wave_correct_;\n    detail::WaveCorrectKind wave_correct_kind_;\n    Ptr<WarperCreator> warper_;\n    Ptr<detail::ExposureCompensator> exposure_comp_;\n    Ptr<detail::SeamFinder> seam_finder_;\n    Ptr<detail::Blender> blender_;\n\n    std::vector<cv::UMat> imgs_;\n    std::vector<std::vector<cv::Rect> > rois_;\n    std::vector<cv::Size> full_img_sizes_;\n    std::vector<detail::ImageFeatures> features_;\n    std::vector<detail::MatchesInfo> pairwise_matches_;\n    std::vector<cv::UMat> seam_est_imgs_;\n    std::vector<int> indices_;\n    std::vector<detail::CameraParams> cameras_;\n    double work_scale_;\n    double seam_scale_;\n    double seam_work_aspect_;\n    double warped_image_scale_;\n};\n\nCV_EXPORTS_W Ptr<Stitcher> createStitcher(bool try_use_gpu = false);\n\n//! @} stitching\n\n} // namespace cv\n\n#endif // __OPENCV_STITCHING_STITCHER_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/structured_light/graycodepattern.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2015, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_GRAY_CODE_PATTERN_HPP__\n#define __OPENCV_GRAY_CODE_PATTERN_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv {\nnamespace structured_light {\n//! @addtogroup structured_light\n//! @{\n\n/** @brief Class implementing the Gray-code pattern, based on @cite UNDERWORLD.\n *\n *  The generation of the pattern images is performed with Gray encoding using the traditional white and black colors.\n *\n *  The information about the two image axes x, y is encoded separately into two different pattern sequences.\n *  A projector P with resolution (P_res_x, P_res_y) will result in Ncols = log 2 (P_res_x) encoded pattern images representing the columns, and\n *  in Nrows = log 2 (P_res_y) encoded pattern images representing the rows.\n *  For example a projector with resolution 1024x768 will result in Ncols = 10 and Nrows = 10.\n\n *  However, the generated pattern sequence consists of both regular color and color-inverted images: inverted pattern images are images\n *  with the same structure as the original but with inverted colors.\n *  This provides an effective method for easily determining the intensity value of each pixel when it is lit (highest value) and\n *  when it is not lit (lowest value). So for a a projector with resolution 1024x768, the number of pattern images will be Ncols * 2 + Nrows * 2 = 40.\n *\n */\nclass CV_EXPORTS_W GrayCodePattern : public StructuredLightPattern\n{\n public:\n\n  /** @brief Parameters of StructuredLightPattern constructor.\n   *  @param width Projector's width. Default value is 1024.\n   *  @param height Projector's height. Default value is 768.\n   */\n  struct CV_EXPORTS_W_SIMPLE Params\n  {\n    CV_WRAP\n    Params();\n    CV_PROP_RW\n    int width;\n    CV_PROP_RW\n    int height;\n  };\n\n  /** @brief Constructor\n   @param parameters GrayCodePattern parameters GrayCodePattern::Params: the width and the height of the projector.\n   */\n  CV_WRAP\n  static Ptr<GrayCodePattern> create( const GrayCodePattern::Params &parameters = GrayCodePattern::Params() );\n\n  /** @brief Get the number of pattern images needed for the graycode pattern.\n   *\n   * @return The number of pattern images needed for the graycode pattern.\n   *\n   */\n   CV_WRAP\n   virtual size_t getNumberOfPatternImages() const = 0;\n\n  /** @brief Sets the value for white threshold, needed for decoding.\n   *\n   *  White threshold is a number between 0-255 that represents the minimum brightness difference required for valid pixels, between the graycode pattern and its inverse images; used in getProjPixel method.\n   *\n   *  @param value The desired white threshold value.\n   *\n   */\n  CV_WRAP\n  virtual void setWhiteThreshold( size_t value ) = 0;\n\n  /** @brief Sets the value for black threshold, needed for decoding (shadowsmasks computation).\n   *\n   *  Black threshold is a number between 0-255 that represents the minimum brightness difference required for valid pixels, between the fully illuminated (white) and the not illuminated images (black); used in computeShadowMasks method.\n   *\n   *  @param value The desired black threshold value.\n   *\n   */\n  CV_WRAP\n  virtual void setBlackThreshold( size_t value ) = 0;\n\n  /** @brief Generates the all-black and all-white images needed for shadowMasks computation.\n   *\n   *  To identify shadow regions, the regions of two images where the pixels are not lit by projector's light and thus where there is not coded information,\n   *  the 3DUNDERWORLD algorithm computes a shadow mask for the two cameras views, starting from a white and a black images captured by each camera.\n   *  This method generates these two additional images to project.\n   *\n   *  @param blackImage The generated all-black CV_8U image, at projector's resolution.\n   *  @param whiteImage The generated all-white CV_8U image, at projector's resolution.\n   */\n  CV_WRAP\n  virtual void getImagesForShadowMasks( InputOutputArray blackImage, InputOutputArray whiteImage ) const = 0;\n\n  /** @brief For a (x,y) pixel of a camera returns the corresponding projector pixel.\n   *\n   *  The function decodes each pixel in the pattern images acquired by a camera into their corresponding decimal numbers representing the projector's column and row,\n   *  providing a mapping between camera's and projector's pixel.\n   *\n   *  @param patternImages The pattern images acquired by the camera, stored in a grayscale vector < Mat >.\n   *  @param x x coordinate of the image pixel.\n   *  @param y y coordinate of the image pixel.\n   *  @param projPix Projector's pixel corresponding to the camera's pixel: projPix.x and projPix.y are the image coordinates of the projector’s pixel corresponding to the pixel being decoded in a camera.\n   */\n  CV_WRAP\n  virtual bool getProjPixel( InputArrayOfArrays patternImages, int x, int y, Point &projPix ) const = 0;\n};\n\n//! @}\n}\n}\n#endif"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/structured_light/structured_light.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2015, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_STRUCTURED_LIGHT_HPP__\n#define __OPENCV_STRUCTURED_LIGHT_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv {\nnamespace structured_light {\n//! @addtogroup structured_light\n//! @{\n\n//! Type of the decoding algorithm\n// other algorithms can be implemented\nenum\n{\n  DECODE_3D_UNDERWORLD = 0  //!< Kyriakos Herakleous, Charalambos Poullis. “3DUNDERWORLD-SLS: An Open-Source Structured-Light Scanning System for Rapid Geometry Acquisition”, arXiv preprint arXiv:1406.6595 (2014).\n};\n\n/** @brief Abstract base class for generating and decoding structured light patterns.\n */\nclass CV_EXPORTS_W StructuredLightPattern : public virtual Algorithm\n{\n public:\n  /** @brief Generates the structured light pattern to project.\n\n   @param patternImages The generated pattern: a vector<Mat>, in which each image is a CV_8U Mat at projector's resolution.\n   */\n  CV_WRAP\n  virtual bool generate( OutputArrayOfArrays patternImages ) = 0;\n\n  /** @brief Decodes the structured light pattern, generating a disparity map\n\n   @param patternImages The acquired pattern images to decode (vector<vector<Mat>>), loaded as grayscale and previously rectified.\n   @param disparityMap The decoding result: a CV_64F Mat at image resolution, storing the computed disparity map.\n   @param blackImages The all-black images needed for shadowMasks computation.\n   @param whiteImages The all-white images needed for shadowMasks computation.\n   @param flags Flags setting decoding algorithms. Default: DECODE_3D_UNDERWORLD.\n   @note All the images must be at the same resolution.\n   */\n  CV_WRAP\n  virtual bool decode( InputArrayOfArrays patternImages, OutputArray disparityMap, InputArrayOfArrays blackImages =\n                          noArray(),\n                      InputArrayOfArrays whiteImages = noArray(), int flags = DECODE_3D_UNDERWORLD ) const = 0;\n};\n\n//! @}\n\n}\n}\n#endif"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/structured_light.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2015, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n/*#ifdef __OPENCV_BUILD\n #error this is a compatibility header which should not be used inside the OpenCV library\n #endif*/\n\n#include \"opencv2/structured_light/structured_light.hpp\"\n#include \"opencv2/structured_light/graycodepattern.hpp\"\n\n/** @defgroup structured_light Structured Light API\n\n Structured light is considered one of the most effective techniques to acquire 3D models.\n This technique is based on projecting a light pattern and capturing the illuminated scene\n from one or more points of view. Since the pattern is coded, correspondences between image\n points and points of the projected pattern can be quickly found and 3D information easily\n retrieved.\n\n One of the most commonly exploited coding strategies is based on trmatime-multiplexing. In this\n case, a set of patterns  are successively projected onto the measuring surface.\n The codeword for a given pixel is usually formed by  the sequence of illuminance values for that\n pixel across the projected patterns. Thus, the codification is called  temporal because the bits\n of the codewords are multiplexed in time @cite pattern .\n\n In this module a time-multiplexing coding strategy based on Gray encoding is implemented following the\n (stereo) approach described in 3DUNDERWORLD algorithm @cite UNDERWORLD .\n For more details, see @ref tutorial_structured_light.\n\n */"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/superres/optical_flow.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__\n#define __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\n    namespace superres\n    {\n\n//! @addtogroup superres\n//! @{\n\n        class CV_EXPORTS DenseOpticalFlowExt : public cv::Algorithm\n        {\n        public:\n            virtual void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2 = noArray()) = 0;\n            virtual void collectGarbage() = 0;\n        };\n\n\n        class CV_EXPORTS FarnebackOpticalFlow : public virtual DenseOpticalFlowExt\n        {\n        public:\n            /** @see setPyrScale */\n            virtual double getPyrScale() const = 0;\n            /** @copybrief getPyrScale @see getPyrScale */\n            virtual void setPyrScale(double val) = 0;\n            /** @see setLevelsNumber */\n            virtual int getLevelsNumber() const = 0;\n            /** @copybrief getLevelsNumber @see getLevelsNumber */\n            virtual void setLevelsNumber(int val) = 0;\n            /** @see setWindowSize */\n            virtual int getWindowSize() const = 0;\n            /** @copybrief getWindowSize @see getWindowSize */\n            virtual void setWindowSize(int val) = 0;\n            /** @see setIterations */\n            virtual int getIterations() const = 0;\n            /** @copybrief getIterations @see getIterations */\n            virtual void setIterations(int val) = 0;\n            /** @see setPolyN */\n            virtual int getPolyN() const = 0;\n            /** @copybrief getPolyN @see getPolyN */\n            virtual void setPolyN(int val) = 0;\n            /** @see setPolySigma */\n            virtual double getPolySigma() const = 0;\n            /** @copybrief getPolySigma @see getPolySigma */\n            virtual void setPolySigma(double val) = 0;\n            /** @see setFlags */\n            virtual int getFlags() const = 0;\n            /** @copybrief getFlags @see getFlags */\n            virtual void setFlags(int val) = 0;\n        };\n        CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback();\n        CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback_CUDA();\n\n\n//        CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple();\n\n\n        class CV_EXPORTS DualTVL1OpticalFlow : public virtual DenseOpticalFlowExt\n        {\n        public:\n            /** @see setTau */\n            virtual double getTau() const = 0;\n            /** @copybrief getTau @see getTau */\n            virtual void setTau(double val) = 0;\n            /** @see setLambda */\n            virtual double getLambda() const = 0;\n            /** @copybrief getLambda @see getLambda */\n            virtual void setLambda(double val) = 0;\n            /** @see setTheta */\n            virtual double getTheta() const = 0;\n            /** @copybrief getTheta @see getTheta */\n            virtual void setTheta(double val) = 0;\n            /** @see setScalesNumber */\n            virtual int getScalesNumber() const = 0;\n            /** @copybrief getScalesNumber @see getScalesNumber */\n            virtual void setScalesNumber(int val) = 0;\n            /** @see setWarpingsNumber */\n            virtual int getWarpingsNumber() const = 0;\n            /** @copybrief getWarpingsNumber @see getWarpingsNumber */\n            virtual void setWarpingsNumber(int val) = 0;\n            /** @see setEpsilon */\n            virtual double getEpsilon() const = 0;\n            /** @copybrief getEpsilon @see getEpsilon */\n            virtual void setEpsilon(double val) = 0;\n            /** @see setIterations */\n            virtual int getIterations() const = 0;\n            /** @copybrief getIterations @see getIterations */\n            virtual void setIterations(int val) = 0;\n            /** @see setUseInitialFlow */\n            virtual bool getUseInitialFlow() const = 0;\n            /** @copybrief getUseInitialFlow @see getUseInitialFlow */\n            virtual void setUseInitialFlow(bool val) = 0;\n        };\n        CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();\n        CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1_CUDA();\n\n\n        class CV_EXPORTS BroxOpticalFlow : public virtual DenseOpticalFlowExt\n        {\n        public:\n            //! @brief Flow smoothness\n            /** @see setAlpha */\n            virtual double getAlpha() const = 0;\n            /** @copybrief getAlpha @see getAlpha */\n            virtual void setAlpha(double val) = 0;\n            //! @brief Gradient constancy importance\n            /** @see setGamma */\n            virtual double getGamma() const = 0;\n            /** @copybrief getGamma @see getGamma */\n            virtual void setGamma(double val) = 0;\n            //! @brief Pyramid scale factor\n            /** @see setScaleFactor */\n            virtual double getScaleFactor() const = 0;\n            /** @copybrief getScaleFactor @see getScaleFactor */\n            virtual void setScaleFactor(double val) = 0;\n            //! @brief Number of lagged non-linearity iterations (inner loop)\n            /** @see setInnerIterations */\n            virtual int getInnerIterations() const = 0;\n            /** @copybrief getInnerIterations @see getInnerIterations */\n            virtual void setInnerIterations(int val) = 0;\n            //! @brief Number of warping iterations (number of pyramid levels)\n            /** @see setOuterIterations */\n            virtual int getOuterIterations() const = 0;\n            /** @copybrief getOuterIterations @see getOuterIterations */\n            virtual void setOuterIterations(int val) = 0;\n            //! @brief Number of linear system solver iterations\n            /** @see setSolverIterations */\n            virtual int getSolverIterations() const = 0;\n            /** @copybrief getSolverIterations @see getSolverIterations */\n            virtual void setSolverIterations(int val) = 0;\n        };\n        CV_EXPORTS Ptr<BroxOpticalFlow> createOptFlow_Brox_CUDA();\n\n\n        class PyrLKOpticalFlow : public virtual DenseOpticalFlowExt\n        {\n        public:\n            /** @see setWindowSize */\n            virtual int getWindowSize() const = 0;\n            /** @copybrief getWindowSize @see getWindowSize */\n            virtual void setWindowSize(int val) = 0;\n            /** @see setMaxLevel */\n            virtual int getMaxLevel() const = 0;\n            /** @copybrief getMaxLevel @see getMaxLevel */\n            virtual void setMaxLevel(int val) = 0;\n            /** @see setIterations */\n            virtual int getIterations() const = 0;\n            /** @copybrief getIterations @see getIterations */\n            virtual void setIterations(int val) = 0;\n        };\n        CV_EXPORTS Ptr<PyrLKOpticalFlow> createOptFlow_PyrLK_CUDA();\n\n//! @}\n\n    }\n}\n\n#endif // __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/superres.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_SUPERRES_HPP__\n#define __OPENCV_SUPERRES_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/superres/optical_flow.hpp\"\n\n/**\n  @defgroup superres Super Resolution\n\nThe Super Resolution module contains a set of functions and classes that can be used to solve the\nproblem of resolution enhancement. There are a few methods implemented, most of them are descibed in\nthe papers @cite Farsiu03 and @cite Mitzel09 .\n\n */\n\nnamespace cv\n{\n    namespace superres\n    {\n\n//! @addtogroup superres\n//! @{\n\n        class CV_EXPORTS FrameSource\n        {\n        public:\n            virtual ~FrameSource();\n\n            virtual void nextFrame(OutputArray frame) = 0;\n            virtual void reset() = 0;\n        };\n\n        CV_EXPORTS Ptr<FrameSource> createFrameSource_Empty();\n\n        CV_EXPORTS Ptr<FrameSource> createFrameSource_Video(const String& fileName);\n        CV_EXPORTS Ptr<FrameSource> createFrameSource_Video_CUDA(const String& fileName);\n\n        CV_EXPORTS Ptr<FrameSource> createFrameSource_Camera(int deviceId = 0);\n\n        /** @brief Base class for Super Resolution algorithms.\n\n        The class is only used to define the common interface for the whole family of Super Resolution\n        algorithms.\n         */\n        class CV_EXPORTS SuperResolution : public cv::Algorithm, public FrameSource\n        {\n        public:\n            /** @brief Set input frame source for Super Resolution algorithm.\n\n            @param frameSource Input frame source\n             */\n            void setInput(const Ptr<FrameSource>& frameSource);\n\n            /** @brief Process next frame from input and return output result.\n\n            @param frame Output result\n             */\n            void nextFrame(OutputArray frame);\n            void reset();\n\n            /** @brief Clear all inner buffers.\n            */\n            virtual void collectGarbage();\n\n            //! @brief Scale factor\n            /** @see setScale */\n            virtual int getScale() const = 0;\n            /** @copybrief getScale @see getScale */\n            virtual void setScale(int val) = 0;\n\n            //! @brief Iterations count\n            /** @see setIterations */\n            virtual int getIterations() const = 0;\n            /** @copybrief getIterations @see getIterations */\n            virtual void setIterations(int val) = 0;\n\n            //! @brief Asymptotic value of steepest descent method\n            /** @see setTau */\n            virtual double getTau() const = 0;\n            /** @copybrief getTau @see getTau */\n            virtual void setTau(double val) = 0;\n\n            //! @brief Weight parameter to balance data term and smoothness term\n            /** @see setLabmda */\n            virtual double getLabmda() const = 0;\n            /** @copybrief getLabmda @see getLabmda */\n            virtual void setLabmda(double val) = 0;\n\n            //! @brief Parameter of spacial distribution in Bilateral-TV\n            /** @see setAlpha */\n            virtual double getAlpha() const = 0;\n            /** @copybrief getAlpha @see getAlpha */\n            virtual void setAlpha(double val) = 0;\n\n            //! @brief Kernel size of Bilateral-TV filter\n            /** @see setKernelSize */\n            virtual int getKernelSize() const = 0;\n            /** @copybrief getKernelSize @see getKernelSize */\n            virtual void setKernelSize(int val) = 0;\n\n            //! @brief Gaussian blur kernel size\n            /** @see setBlurKernelSize */\n            virtual int getBlurKernelSize() const = 0;\n            /** @copybrief getBlurKernelSize @see getBlurKernelSize */\n            virtual void setBlurKernelSize(int val) = 0;\n\n            //! @brief Gaussian blur sigma\n            /** @see setBlurSigma */\n            virtual double getBlurSigma() const = 0;\n            /** @copybrief getBlurSigma @see getBlurSigma */\n            virtual void setBlurSigma(double val) = 0;\n\n            //! @brief Radius of the temporal search area\n            /** @see setTemporalAreaRadius */\n            virtual int getTemporalAreaRadius() const = 0;\n            /** @copybrief getTemporalAreaRadius @see getTemporalAreaRadius */\n            virtual void setTemporalAreaRadius(int val) = 0;\n\n            //! @brief Dense optical flow algorithm\n            /** @see setOpticalFlow */\n            virtual Ptr<cv::superres::DenseOpticalFlowExt> getOpticalFlow() const = 0;\n            /** @copybrief getOpticalFlow @see getOpticalFlow */\n            virtual void setOpticalFlow(const Ptr<cv::superres::DenseOpticalFlowExt> &val) = 0;\n\n        protected:\n            SuperResolution();\n\n            virtual void initImpl(Ptr<FrameSource>& frameSource) = 0;\n            virtual void processImpl(Ptr<FrameSource>& frameSource, OutputArray output) = 0;\n\n            bool isUmat_;\n\n        private:\n            Ptr<FrameSource> frameSource_;\n            bool firstCall_;\n        };\n\n        /** @brief Create Bilateral TV-L1 Super Resolution.\n\n        This class implements Super Resolution algorithm described in the papers @cite Farsiu03 and\n        @cite Mitzel09 .\n\n        Here are important members of the class that control the algorithm, which you can set after\n        constructing the class instance:\n\n        -   **int scale** Scale factor.\n        -   **int iterations** Iteration count.\n        -   **double tau** Asymptotic value of steepest descent method.\n        -   **double lambda** Weight parameter to balance data term and smoothness term.\n        -   **double alpha** Parameter of spacial distribution in Bilateral-TV.\n        -   **int btvKernelSize** Kernel size of Bilateral-TV filter.\n        -   **int blurKernelSize** Gaussian blur kernel size.\n        -   **double blurSigma** Gaussian blur sigma.\n        -   **int temporalAreaRadius** Radius of the temporal search area.\n        -   **Ptr\\<DenseOpticalFlowExt\\> opticalFlow** Dense optical flow algorithm.\n         */\n        CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1();\n        CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_CUDA();\n\n//! @} superres\n\n    }\n}\n\n#endif // __OPENCV_SUPERRES_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/surface_matching/icp.hpp",
    "content": "//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n\n/**\n * @file\n *\n * @brief  Implementation of ICP (Iterative Closest Point) Algorithm\n * @author Tolga Birdal <tbirdal AT gmail.com>\n */\n\n#ifndef __OPENCV_SURFACE_MATCHING_ICP_HPP__\n#define __OPENCV_SURFACE_MATCHING_ICP_HPP__\n\n#include <opencv2/core.hpp>\n\n#include \"pose_3d.hpp\"\n#include <vector>\n\nnamespace cv\n{\nnamespace ppf_match_3d\n{\n\n//! @addtogroup surface_matching\n//! @{\n\n/**\n* @brief This class implements a very efficient and robust variant of the iterative closest point (ICP) algorithm.\n* The task is to register a 3D model (or point cloud) against a set of noisy target data. The variants are put together\n* by myself after certain tests. The task is to be able to match partial, noisy point clouds in cluttered scenes, quickly.\n* You will find that my emphasis is on the performance, while retaining the accuracy.\n* This implementation is based on Tolga Birdal's MATLAB implementation in here:\n* http://www.mathworks.com/matlabcentral/fileexchange/47152-icp-registration-using-efficient-variants-and-multi-resolution-scheme\n* The main contributions come from:\n* 1. Picky ICP:\n* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2003/Zinsser03-ARI.pdf\n* 2. Efficient variants of the ICP Algorithm:\n* http://docs.happycoders.org/orgadoc/graphics/imaging/fasticp_paper.pdf\n* 3. Geometrically Stable Sampling for the ICP Algorithm: https://graphics.stanford.edu/papers/stabicp/stabicp.pdf\n* 4. Multi-resolution registration:\n* http://www.cvl.iis.u-tokyo.ac.jp/~oishi/Papers/Alignment/Jost_MultiResolutionICP_3DIM03.pdf\n* 5. Linearization of Point-to-Plane metric by Kok Lim Low:\n* https://www.comp.nus.edu.sg/~lowkl/publications/lowk_point-to-plane_icp_techrep.pdf\n*/\nclass CV_EXPORTS ICP\n{\npublic:\n\n  enum ICP_SAMPLING_TYPE\n  {\n    ICP_SAMPLING_TYPE_UNIFORM,\n    ICP_SAMPLING_TYPE_GELFAND\n  };\n\n  ICP()\n  {\n    m_tolerance = 0.005f;\n    m_rejectionScale = 2.5f;\n    m_maxIterations = 250;\n    m_numLevels = 6;\n    m_sampleType = ICP_SAMPLING_TYPE_UNIFORM;\n    m_numNeighborsCorr = 1;\n  }\n\n  virtual ~ICP() { }\n\n  /**\n     *  \\brief ICP constructor with default arguments.\n     *  @param [in] iterations\n     *  @param [in] tolerence Controls the accuracy of registration at each iteration of ICP.\n     *  @param [in] rejectionScale Robust outlier rejection is applied for robustness. This value\n            actually corresponds to the standard deviation coefficient. Points with\n            rejectionScale * &sigma are ignored during registration.\n     *  @param [in] numLevels Number of pyramid levels to proceed. Deep pyramids increase speed but\n            decrease accuracy. Too coarse pyramids might have computational overhead on top of the\n            inaccurate registrtaion. This parameter should be chosen to optimize a balance. Typical\n            values range from 4 to 10.\n     *  @param [in] sampleType Currently this parameter is ignored and only uniform sampling is\n            applied. Leave it as 0.\n     *  @param [in] numMaxCorr Currently this parameter is ignored and only PickyICP is applied. Leave it as 1.\n     */\n  ICP(const int iterations, const float tolerence=0.05, const float rejectionScale=2.5, const int numLevels=6, const ICP_SAMPLING_TYPE sampleType = ICP_SAMPLING_TYPE_UNIFORM, const int numMaxCorr=1)\n  {\n    m_tolerance = tolerence;\n    m_numNeighborsCorr = numMaxCorr;\n    m_rejectionScale = rejectionScale;\n    m_maxIterations = iterations;\n    m_numLevels = numLevels;\n    m_sampleType = sampleType;\n  }\n\n  /**\n     *  \\brief Perform registration\n     *\n     *  @param [in] srcPC The input point cloud for the model. Expected to have the normals (Nx6). Currently,\n     *  CV_32F is the only supported data type.\n     *  @param [in] dstPC The input point cloud for the scene. It is assumed that the model is registered on the scene. Scene remains static. Expected to have the normals (Nx6). Currently, CV_32F is the only supported data type.\n     *  @param [out] residual The output registration error.\n     *  @param [out] pose Transformation between srcPC and dstPC.\n     *  \\return On successful termination, the function returns 0.\n     *\n     *  \\details It is assumed that the model is registered on the scene. Scene remains static, while the model transforms. The output poses transform the models onto the scene. Because of the point to plane minimization, the scene is expected to have the normals available. Expected to have the normals (Nx6).\n     */\n  int registerModelToScene(const Mat& srcPC, const Mat& dstPC, double& residual, double pose[16]);\n\n  /**\n     *  \\brief Perform registration with multiple initial poses\n     *\n     *  @param [in] srcPC The input point cloud for the model. Expected to have the normals (Nx6). Currently,\n     *  CV_32F is the only supported data type.\n     *  @param [in] dstPC The input point cloud for the scene. Currently, CV_32F is the only supported data type.\n     *  @param [in,out] poses Input poses to start with but also list output of poses.\n     *  \\return On successful termination, the function returns 0.\n     *\n     *  \\details It is assumed that the model is registered on the scene. Scene remains static, while the model transforms. The output poses transform the models onto the scene. Because of the point to plane minimization, the scene is expected to have the normals available. Expected to have the normals (Nx6).\n     */\n  int registerModelToScene(const Mat& srcPC, const Mat& dstPC, std::vector<Pose3DPtr>& poses);\n\nprivate:\n  float m_tolerance;\n  int m_maxIterations;\n  float m_rejectionScale;\n  int m_numNeighborsCorr;\n  int m_numLevels;\n  int m_sampleType;\n\n};\n\n//! @}\n\n} // namespace ppf_match_3d\n\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/surface_matching/pose_3d.hpp",
    "content": "//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n\n/** @file\n@author Tolga Birdal <tbirdal AT gmail.com>\n*/\n\n#ifndef __OPENCV_SURFACE_MATCHING_POSE3D_HPP__\n#define __OPENCV_SURFACE_MATCHING_POSE3D_HPP__\n\n#include \"opencv2/core/cvstd.hpp\" // cv::Ptr\n#include <vector>\n#include <string>\n\nnamespace cv\n{\nnamespace ppf_match_3d\n{\n\n//! @addtogroup surface_matching\n//! @{\n\nclass Pose3D;\ntypedef Ptr<Pose3D> Pose3DPtr;\n\nclass PoseCluster3D;\ntypedef Ptr<PoseCluster3D> PoseCluster3DPtr;\n\n/**\n* @brief Class, allowing the storage of a pose. The data structure stores both\n* the quaternions and the matrix forms. It supports IO functionality together with\n* various helper methods to work with poses\n*\n*/\nclass CV_EXPORTS Pose3D\n{\npublic:\n  Pose3D()\n  {\n    alpha=0;\n    modelIndex=0;\n    numVotes=0;\n    residual = 0;\n\n    for (int i=0; i<16; i++)\n      pose[i]=0;\n  }\n\n  Pose3D(double Alpha, unsigned int ModelIndex=0, unsigned int NumVotes=0)\n  {\n    alpha = Alpha;\n    modelIndex = ModelIndex;\n    numVotes = NumVotes;\n    residual=0;\n\n    for (int i=0; i<16; i++)\n      pose[i]=0;\n  }\n\n  /**\n   *  \\brief Updates the pose with the new one\n   *  \\param [in] NewPose New pose to overwrite\n   */\n  void updatePose(double NewPose[16]);\n\n  /**\n   *  \\brief Updates the pose with the new one\n   */\n  void updatePose(double NewR[9], double NewT[3]);\n\n  /**\n   *  \\brief Updates the pose with the new one, but this time using quaternions to represent rotation\n   */\n  void updatePoseQuat(double Q[4], double NewT[3]);\n\n  /**\n   *  \\brief Left multiplies the existing pose in order to update the transformation\n   *  \\param [in] IncrementalPose New pose to apply\n   */\n  void appendPose(double IncrementalPose[16]);\n  void printPose();\n\n  Pose3DPtr clone();\n\n  int writePose(FILE* f);\n  int readPose(FILE* f);\n  int writePose(const std::string& FileName);\n  int readPose(const std::string& FileName);\n\n  virtual ~Pose3D() {}\n\n  double alpha, residual;\n  unsigned int modelIndex;\n  unsigned int numVotes;\n  double pose[16], angle, t[3], q[4];\n};\n\n/**\n* @brief When multiple poses (see Pose3D) are grouped together (contribute to the same transformation) \n* pose clusters occur. This class is a general container for such groups of poses. It is possible to store,\n* load and perform IO on these poses.\n*/\nclass CV_EXPORTS PoseCluster3D\n{\npublic:\n  PoseCluster3D()\n  {\n    numVotes=0;\n    id=0;\n  }\n\n  PoseCluster3D(Pose3DPtr newPose)\n  {\n    poseList.clear();\n    poseList.push_back(newPose);\n    numVotes=newPose->numVotes;\n    id=0;\n  }\n\n  PoseCluster3D(Pose3DPtr newPose, int newId)\n  {\n    poseList.push_back(newPose);\n    this->numVotes = newPose->numVotes;\n    this->id = newId;\n  }\n\n  virtual ~PoseCluster3D()\n  {}\n\n  /**\n   *  \\brief Adds a new pose to the cluster. The pose should be \"close\" to the mean poses\n   *  in order to preserve the consistency\n   *  \\param [in] newPose Pose to add to the cluster\n   */\n  void addPose(Pose3DPtr newPose);\n\n  int writePoseCluster(FILE* f);\n  int readPoseCluster(FILE* f);\n  int writePoseCluster(const std::string& FileName);\n  int readPoseCluster(const std::string& FileName);\n\n  std::vector<Pose3DPtr> poseList;\n  int numVotes;\n  int id;\n};\n\n//! @}\n\n} // namespace ppf_match_3d\n} // namespace cv\n\n#endif\n\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/surface_matching/ppf_helpers.hpp",
    "content": "//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n\n/** @file\n@author Tolga Birdal <tbirdal AT gmail.com>\n*/\n\n#ifndef __OPENCV_SURFACE_MATCHING_HELPERS_HPP__\n#define __OPENCV_SURFACE_MATCHING_HELPERS_HPP__\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace ppf_match_3d\n{\n\n//! @addtogroup surface_matching\n//! @{\n\n/**\n *  @brief Load a PLY file\n *  @param [in] fileName The PLY model to read\n *  @param [in] withNormals Flag wheather the input PLY contains normal information,\n *  and whether it should be loaded or not\n *  @return Returns the matrix on successfull load\n */\nCV_EXPORTS Mat loadPLYSimple(const char* fileName, int withNormals);\n\n/**\n *  @brief Write a point cloud to PLY file\n *  @param [in] PC Input point cloud\n *  @param [in] fileName The PLY model file to write\n*/\nCV_EXPORTS void writePLY(Mat PC, const char* fileName);\n\n/**\n*  @brief Used for debbuging pruposes, writes a point cloud to a PLY file with the tip\n*  of the normal vectors as visible red points\n*  @param [in] PC Input point cloud\n*  @param [in] fileName The PLY model file to write\n*/\nCV_EXPORTS void writePLYVisibleNormals(Mat PC, const char* fileName);\n\nMat samplePCUniform(Mat PC, int sampleStep);\nMat samplePCUniformInd(Mat PC, int sampleStep, std::vector<int>& indices);\n\n/**\n *  Sample a point cloud using uniform steps\n *  @param [in] pc Input point cloud\n *  @param [in] xrange X components (min and max) of the bounding box of the model\n *  @param [in] yrange Y components (min and max) of the bounding box of the model\n *  @param [in] zrange Z components (min and max) of the bounding box of the model\n *  @param [in] sample_step_relative The point cloud is sampled such that all points\n *  have a certain minimum distance. This minimum distance is determined relatively using\n *  the parameter sample_step_relative. \n *  @param [in] weightByCenter The contribution of the quantized data points can be weighted\n *  by the distance to the origin. This parameter enables/disables the use of weighting.\n *  @return Sampled point cloud\n*/\nCV_EXPORTS Mat samplePCByQuantization(Mat pc, float xrange[2], float yrange[2], float zrange[2], float sample_step_relative, int weightByCenter=0);\n\nvoid computeBboxStd(Mat pc, float xRange[2], float yRange[2], float zRange[2]);\n\nvoid* indexPCFlann(Mat pc);\nvoid destroyFlann(void* flannIndex);\nvoid queryPCFlann(void* flannIndex, Mat& pc, Mat& indices, Mat& distances);\nvoid queryPCFlann(void* flannIndex, Mat& pc, Mat& indices, Mat& distances, const int numNeighbors);\n\n/**\n *  Mostly for visualization purposes. Normalizes the point cloud in a Hartley-Zissermann\n *  fashion. In other words, the point cloud is centered, and scaled such that the largest\n *  distance from the origin is sqrt(2). Finally a rescaling is applied.\n *  @param [in] pc Input point cloud (CV_32F family). Point clouds with 3 or 6 elements per\n *  row are expected.\n *  @param [in] scale The scale after normalization. Default to 1.\n *  @return Normalized point cloud\n*/\nCV_EXPORTS Mat normalize_pc(Mat pc, float scale);\n\nMat normalizePCCoeff(Mat pc, float scale, float* Cx, float* Cy, float* Cz, float* MinVal, float* MaxVal);\nMat transPCCoeff(Mat pc, float scale, float Cx, float Cy, float Cz, float MinVal, float MaxVal);\n\n/**\n *  Transforms the point cloud with a given a homogeneous 4x4 pose matrix (in double precision)\n *  @param [in] pc Input point cloud (CV_32F family). Point clouds with 3 or 6 elements per\n *  row are expected. In the case where the normals are provided, they are also rotated to be\n *  compatible with the entire transformation\n *  @param [in] Pose 4x4 pose matrix, but linearized in row-major form.\n *  @return Transformed point cloud\n*/\nCV_EXPORTS Mat transformPCPose(Mat pc, double Pose[16]);\n\n/**\n *  Generate a random 4x4 pose matrix\n *  @param [out] Pose The random pose\n*/\nCV_EXPORTS void getRandomPose(double Pose[16]);\n\n/**\n *  Adds a uniform noise in the given scale to the input point cloud\n *  @param [in] pc Input point cloud (CV_32F family). \n *  @param [in] scale Input scale of the noise. The larger the scale, the more noisy the output\n*/\nCV_EXPORTS Mat addNoisePC(Mat pc, double scale);\n\n/**\n *  @brief Compute the normals of an arbitrary point cloud\n *  computeNormalsPC3d uses a plane fitting approach to smoothly compute\n *  local normals. Normals are obtained through the eigenvector of the covariance\n *  matrix, corresponding to the smallest eigen value.\n *  If PCNormals is provided to be an Nx6 matrix, then no new allocation\n *  is made, instead the existing memory is overwritten.\n *  @param [in] PC Input point cloud to compute the normals for.\n *  @param [in] PCNormals Output point cloud\n *  @param [in] NumNeighbors Number of neighbors to take into account in a local region\n *  @param [in] FlipViewpoint Should normals be flipped to a viewing direction?\n *  @param [in] viewpoint\n *  @return Returns 0 on success\n */\nCV_EXPORTS int computeNormalsPC3d(const Mat& PC, Mat& PCNormals, const int NumNeighbors, const bool FlipViewpoint, const double viewpoint[3]);\n\n//! @}\n\n} // namespace ppf_match_3d\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/surface_matching/ppf_match_3d.hpp",
    "content": "//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n\n/**\n** ppf_match_3d : Interfaces for matching 3d surfaces in 3d scenes. This module implements the algorithm from Bertram Drost and Slobodan Ilic.\n** Use: Read a 3D model, load a 3D scene and match the model to the scene\n**\n**\n**  Creation - 2014\n**      Author: Tolga Birdal (tbirdal@gmail.com)\n**\n** Refer to the following research paper for more information:\n**  B. Drost, Markus Ulrich, N. Navab, S. Ilic\nModel Globally, Match Locally: Efficient and Robust 3D Object Recognition\nIEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR), San Francisco, California (USA), June 2010.\n***/\n\n/** @file\n@author Tolga Birdal  <tbirdal AT gmail.com>\n*/\n\n\n#ifndef __OPENCV_SURFACE_MATCHING_PPF_MATCH_3D_HPP__\n#define __OPENCV_SURFACE_MATCHING_PPF_MATCH_3D_HPP__\n\n#include <opencv2/core.hpp>\n\n#include <vector>\n#include \"pose_3d.hpp\"\n#include \"t_hash_int.hpp\"\n\nnamespace cv\n{\nnamespace ppf_match_3d\n{\n\n//! @addtogroup surface_matching\n//! @{\n\n/**\n  * @brief Struct, holding a node in the hashtable\n  */\ntypedef struct THash\n{\n  int id;\n  int i, ppfInd;\n} THash;\n\n/**\n  * @brief Class, allowing the load and matching 3D models.\n  * Typical Use:\n  * @code\n  * // Train a model\n  * ppf_match_3d::PPF3DDetector detector(0.05, 0.05);\n  * detector.trainModel(pc);\n  * // Search the model in a given scene\n  * vector<Pose3DPtr> results;\n  * detector.match(pcTest, results, 1.0/5.0,0.05);\n  * @endcode\n  */\nclass CV_EXPORTS PPF3DDetector\n{\npublic:\n\n  /**\n   * \\brief Empty constructor. Sets default arguments\n   */\n  PPF3DDetector();\n\n  /**\n    * Constructor with arguments\n    * @param [in] relativeSamplingStep Sampling distance relative to the object's diameter. Models are first sampled uniformly in order to improve efficiency. Decreasing this value leads to a denser model, and a more accurate pose estimation but the larger the model, the slower the training. Increasing the value leads to a less accurate pose computation but a smaller model and faster model generation and matching. Beware of the memory consumption when using small values.\n    * @param [in] relativeDistanceStep The discretization distance of the point pair distance relative to the model's diameter. This value has a direct impact on the hashtable. Using small values would lead to too fine discretization, and thus ambiguity in the bins of hashtable. Too large values would lead to no discrimination over the feature vectors and different point pair features would be assigned to the same bin. This argument defaults to the value of RelativeSamplingStep. For noisy scenes, the value can be increased to improve the robustness of the matching against noisy points.\n    * @param [in] numAngles Set the discretization of the point pair orientation as the number of subdivisions of the angle. This value is the equivalent of RelativeDistanceStep for the orientations. Increasing the value increases the precision of the matching but decreases the robustness against incorrect normal directions. Decreasing the value decreases the precision of the matching but increases the robustness against incorrect normal directions. For very noisy scenes where the normal directions can not be computed accurately, the value can be set to 25 or 20.\n    */\n  PPF3DDetector(const double relativeSamplingStep, const double relativeDistanceStep=0.05, const double numAngles=30);\n\n  virtual ~PPF3DDetector();\n\n  /**\n    *  Set the parameters for the search\n    *  @param [in] positionThreshold Position threshold controlling the similarity of translations. Depends on the units of calibration/model.\n    *  @param [in] rotationThreshold Position threshold controlling the similarity of rotations. This parameter can be perceived as a threshold over the difference of angles\n    *  @param [in] useWeightedClustering The algorithm by default clusters the poses without weighting. A non-zero value would indicate that the pose clustering should take into account the number of votes as the weights and perform a weighted averaging instead of a simple one.\n    */\n  void setSearchParams(const double positionThreshold=-1, const double rotationThreshold=-1, const bool useWeightedClustering=false);\n\n  /**\n    *  \\brief Trains a new model.\n    *\n    *  @param [in] Model The input point cloud with normals (Nx6)\n    *\n    *  \\details Uses the parameters set in the constructor to downsample and learn a new model. When the model is learnt, the instance gets ready for calling \"match\".\n    */\n  void trainModel(const Mat& Model);\n\n  /**\n    *  \\brief Matches a trained model across a provided scene.\n    *\n    *  @param [in] scene Point cloud for the scene\n    *  @param [out] results List of output poses\n    *  @param [in] relativeSceneSampleStep The ratio of scene points to be used for the matching after sampling with relativeSceneDistance. For example, if this value is set to 1.0/5.0, every 5th point from the scene is used for pose estimation. This parameter allows an easy trade-off between speed and accuracy of the matching. Increasing the value leads to less points being used and in turn to a faster but less accurate pose computation. Decreasing the value has the inverse effect.\n    *  @param [in] relativeSceneDistance Set the distance threshold relative to the diameter of the model. This parameter is equivalent to relativeSamplingStep in the training stage. This parameter acts like a prior sampling with the relativeSceneSampleStep parameter.\n    */\n  void match(const Mat& scene, std::vector<Pose3DPtr> &results, const double relativeSceneSampleStep=1.0/5.0, const double relativeSceneDistance=0.03);\n\n  void read(const FileNode& fn);\n  void write(FileStorage& fs) const;\n\nprotected:\n\n  double angle_step, angle_step_radians, distance_step;\n  double sampling_step_relative, angle_step_relative, distance_step_relative;\n  Mat sampled_pc, ppf;\n  int num_ref_points, ppf_step;\n  hashtable_int* hash_table;\n  THash* hash_nodes;\n\n  double position_threshold, rotation_threshold;\n  bool use_weighted_avg;\n\n  int scene_sample_step;\n\n  void clearTrainingModels();\n\nprivate:\n  void computePPFFeatures(const double p1[4], const double n1[4],\n                          const double p2[4], const double n2[4],\n                          double f[4]);\n\n  bool matchPose(const Pose3D& sourcePose, const Pose3D& targetPose);\n\n  void clusterPoses(std::vector<Pose3DPtr> poseList, int numPoses, std::vector<Pose3DPtr> &finalPoses);\n\n  bool trained;\n};\n\n//! @}\n\n} // namespace ppf_match_3d\n\n} // namespace cv\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/surface_matching/t_hash_int.hpp",
    "content": "//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n\n/** @file\n@author Tolga Birdal <tbirdal AT gmail.com>\n*/\n\n#ifndef __OPENCV_SURFACE_MATCHING_T_HASH_INT_HPP__\n#define __OPENCV_SURFACE_MATCHING_T_HASH_INT_HPP__\n\n#include <stdio.h>\n#include <stdlib.h>\n\nnamespace cv\n{\nnamespace ppf_match_3d\n{\n\n//! @addtogroup surface_matching\n//! @{\n\ntypedef unsigned int KeyType;\n\ntypedef struct hashnode_i\n{\n  KeyType key;\n  void *data;\n  struct hashnode_i *next;\n} hashnode_i ;\n\ntypedef struct HSHTBL_i\n{\n  size_t size;\n  struct hashnode_i **nodes;\n  size_t (*hashfunc)(unsigned int);\n} hashtable_int;\n\n\n/** @brief Round up to the next highest power of 2\n\nfrom http://www-graphics.stanford.edu/~seander/bithacks.html\n*/\ninline static unsigned int next_power_of_two(unsigned int value)\n{\n\n  --value;\n  value |= value >> 1;\n  value |= value >> 2;\n  value |= value >> 4;\n  value |= value >> 8;\n  value |= value >> 16;\n  ++value;\n\n  return value;\n}\n\nhashtable_int *hashtableCreate(size_t size, size_t (*hashfunc)(unsigned int));\nvoid hashtableDestroy(hashtable_int *hashtbl);\nint hashtableInsert(hashtable_int *hashtbl, KeyType key, void *data);\nint hashtableInsertHashed(hashtable_int *hashtbl, KeyType key, void *data);\nint hashtableRemove(hashtable_int *hashtbl, KeyType key);\nvoid *hashtableGet(hashtable_int *hashtbl, KeyType key);\nhashnode_i* hashtableGetBucketHashed(hashtable_int *hashtbl, KeyType key);\nint hashtableResize(hashtable_int *hashtbl, size_t size);\nhashtable_int *hashtable_int_clone(hashtable_int *hashtbl);\nhashtable_int *hashtableRead(FILE* f);\nint hashtableWrite(const hashtable_int * hashtbl, const size_t dataSize, FILE* f);\nvoid hashtablePrint(hashtable_int *hashtbl);\n\n//! @}\n\n} // namespace ppf_match_3d\n\n} // namespace cv\n#endif\n\n\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/surface_matching.hpp",
    "content": "//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n#ifndef __OPENCV_SURFACE_MATCHING_HPP__\n#define __OPENCV_SURFACE_MATCHING_HPP__\n\n#include \"surface_matching/ppf_match_3d.hpp\"\n#include \"surface_matching/icp.hpp\"\n\n/** @defgroup surface_matching Surface Matching\n\nNote about the License and Patents\n-----------------------------------\n\nThe following patents have been issued for methods embodied in this\nsoftware: \"Recognition and pose determination of 3D objects in 3D scenes\nusing geometric point pair descriptors and the generalized Hough\nTransform\", Bertram Heinrich Drost, Markus Ulrich, EP Patent 2385483\n(Nov. 21, 2012), assignee: MVTec Software GmbH, 81675 Muenchen\n(Germany); \"Recognition and pose determination of 3D objects in 3D\nscenes\", Bertram Heinrich Drost, Markus Ulrich, US Patent 8830229 (Sept.\n9, 2014), assignee: MVTec Software GmbH, 81675 Muenchen (Germany).\nFurther patents are pending. For further details, contact MVTec Software\nGmbH (info@mvtec.com).\n\nNote that restrictions imposed by these patents (and possibly others)\nexist independently of and may be in conflict with the freedoms granted\nin this license, which refers to copyright of the program, not patents\nfor any methods that it implements.  Both copyright and patent law must\nbe obeyed to legally use and redistribute this program and it is not the\npurpose of this license to induce you to infringe any patents or other\nproperty right claims or to contest validity of any such claims.  If you\nredistribute or use the program, then this license merely protects you\nfrom committing copyright infringement.  It does not protect you from\ncommitting patent infringement.  So, before you do anything with this\nprogram, make sure that you have permission to do so not merely in terms\nof copyright, but also in terms of patent law.\n\nPlease note that this license is not to be understood as a guarantee\neither.  If you use the program according to this license, but in\nconflict with patent law, it does not mean that the licensor will refund\nyou for any losses that you incur if you are sued for your patent\ninfringement.\n\n\nIntroduction to Surface Matching\n--------------------------------\n\nCameras and similar devices with the capability of sensation of 3D structure are becoming more\ncommon. Thus, using depth and intensity information for matching 3D objects (or parts) are of\ncrucial importance for computer vision. Applications range from industrial control to guiding\neveryday actions for visually impaired people. The task in recognition and pose estimation in range\nimages aims to identify and localize a queried 3D free-form object by matching it to the acquired\ndatabase.\n\nFrom an industrial perspective, enabling robots to automatically locate and pick up randomly placed\nand oriented objects from a bin is an important challenge in factory automation, replacing tedious\nand heavy manual labor. A system should be able to recognize and locate objects with a predefined\nshape and estimate the position with the precision necessary for a gripping robot to pick it up.\nThis is where vision guided robotics takes the stage. Similar tools are also capable of guiding\nrobots (and even people) through unstructured environments, leading to automated navigation. These\nproperties make 3D matching from point clouds a ubiquitous necessity. Within this context, I will\nnow describe the OpenCV implementation of a 3D object recognition and pose estimation algorithm\nusing 3D features.\n\nSurface Matching Algorithm Through 3D Features\n----------------------------------------------\n\nThe state of the algorithms in order to achieve the task 3D matching is heavily based on\n@cite drost2010, which is one of the first and main practical methods presented in this area. The\napproach is composed of extracting 3D feature points randomly from depth images or generic point\nclouds, indexing them and later in runtime querying them efficiently. Only the 3D structure is\nconsidered, and a trivial hash table is used for feature queries.\n\nWhile being fully aware that utilization of the nice CAD model structure in order to achieve a smart\npoint sampling, I will be leaving that aside now in order to respect the generalizability of the\nmethods (Typically for such algorithms training on a CAD model is not needed, and a point cloud\nwould be sufficient). Below is the outline of the entire algorithm:\n\n![Outline of the Algorithm](img/outline.jpg)\n\nAs explained, the algorithm relies on the extraction and indexing of point pair features, which are\ndefined as follows:\n\n\\f[\\bf{{F}}(\\bf{{m1}}, \\bf{{m2}}) = (||\\bf{{d}}||_2, <(\\bf{{n1}},\\bf{{d}}), <(\\bf{{n2}},\\bf{{d}}), <(\\bf{{n1}},\\bf{{n2}}))\\f]\n\nwhere \\f$\\bf{{m1}}\\f$ and \\f$\\bf{{m2}}\\f$ are feature two selected points on the model (or scene),\n\\f$\\bf{{d}}\\f$ is the difference vector, \\f$\\bf{{n1}}\\f$ and \\f$\\bf{{n2}}\\f$ are the normals at \\f$\\bf{{m1}}\\f$ and\n\\f$\\bf{m2}\\f$. During the training stage, this vector is quantized, indexed. In the test stage, same\nfeatures are extracted from the scene and compared to the database. With a few tricks like\nseparation of the rotational components, the pose estimation part can also be made efficient (check\nthe reference for more details). A Hough-like voting and clustering is employed to estimate the\nobject pose. To cluster the poses, the raw pose hypotheses are sorted in decreasing order of the\nnumber of votes. From the highest vote, a new cluster is created. If the next pose hypothesis is\nclose to one of the existing clusters, the hypothesis is added to the cluster and the cluster center\nis updated as the average of the pose hypotheses within the cluster. If the next hypothesis is not\nclose to any of the clusters, it creates a new cluster. The proximity testing is done with fixed\nthresholds in translation and rotation. Distance computation and averaging for translation are\nperformed in the 3D Euclidean space, while those for rotation are performed using quaternion\nrepresentation. After clustering, the clusters are sorted in decreasing order of the total number of\nvotes which determines confidence of the estimated poses.\n\nThis pose is further refined using \\f$ICP\\f$ in order to obtain the final pose.\n\nPPF presented above depends largely on robust computation of angles between 3D vectors. Even though\nnot reported in the paper, the naive way of doing this (\\f$\\theta = cos^{-1}({\\bf{a}}\\cdot{\\bf{b}})\\f$\nremains numerically unstable. A better way to do this is then use inverse tangents, like:\n\n\\f[<(\\bf{n1},\\bf{n2})=tan^{-1}(||{\\bf{n1}  \\wedge \\bf{n2}}||_2, \\bf{n1} \\cdot \\bf{n2})\\f]\n\nRough Computation of Object Pose Given PPF\n------------------------------------------\n\nLet me summarize the following notation:\n\n-   \\f$p^i_m\\f$: \\f$i^{th}\\f$ point of the model (\\f$p^j_m\\f$ accordingly)\n-   \\f$n^i_m\\f$: Normal of the \\f$i^{th}\\f$ point of the model (\\f$n^j_m\\f$ accordingly)\n-   \\f$p^i_s\\f$: \\f$i^{th}\\f$ point of the scene (\\f$p^j_s\\f$ accordingly)\n-   \\f$n^i_s\\f$: Normal of the \\f$i^{th}\\f$ point of the scene (\\f$n^j_s\\f$ accordingly)\n-   \\f$T_{m\\rightarrow g}\\f$: The transformation required to translate \\f$p^i_m\\f$ to the origin and rotate\n    its normal \\f$n^i_m\\f$ onto the \\f$x\\f$-axis.\n-   \\f$R_{m\\rightarrow g}\\f$: Rotational component of \\f$T_{m\\rightarrow g}\\f$.\n-   \\f$t_{m\\rightarrow g}\\f$: Translational component of \\f$T_{m\\rightarrow g}\\f$.\n-   \\f$(p^i_m)^{'}\\f$: \\f$i^{th}\\f$ point of the model transformed by \\f$T_{m\\rightarrow g}\\f$. (\\f$(p^j_m)^{'}\\f$\n    accordingly).\n-   \\f${\\bf{R_{m\\rightarrow g}}}\\f$: Axis angle representation of rotation \\f$R_{m\\rightarrow g}\\f$.\n-   \\f$\\theta_{m\\rightarrow g}\\f$: The angular component of the axis angle representation\n    \\f${\\bf{R_{m\\rightarrow g}}}\\f$.\n\nThe transformation in a point pair feature is computed by first finding the transformation\n\\f$T_{m\\rightarrow g}\\f$ from the first point, and applying the same transformation to the second one.\nTransforming each point, together with the normal, to the ground plane leaves us with an angle to\nfind out, during a comparison with a new point pair.\n\nWe could now simply start writing\n\n\\f[(p^i_m)^{'} = T_{m\\rightarrow g} p^i_m\\f]\n\nwhere\n\n\\f[T_{m\\rightarrow g} = -t_{m\\rightarrow g}R_{m\\rightarrow g}\\f]\n\nNote that this is nothing but a stacked transformation. The translational component\n\\f$t_{m\\rightarrow g}\\f$ reads\n\n\\f[t_{m\\rightarrow g} = -R_{m\\rightarrow g}p^i_m\\f]\n\nand the rotational being\n\n\\f[\\theta_{m\\rightarrow g} = \\cos^{-1}(n^i_m \\cdot {\\bf{x}})\\\\\n {\\bf{R_{m\\rightarrow g}}} = n^i_m \\wedge {\\bf{x}}\\f]\n\nin axis angle format. Note that bold refers to the vector form. After this transformation, the\nfeature vectors of the model are registered onto the ground plane X and the angle with respect to\n\\f$x=0\\f$ is called \\f$\\alpha_m\\f$. Similarly, for the scene, it is called \\f$\\alpha_s\\f$.\n\n### Hough-like Voting Scheme\n\nAs shown in the outline, PPF (point pair features) are extracted from the model, quantized, stored\nin the hashtable and indexed, during the training stage. During the runtime however, the similar\noperation is perfomed on the input scene with the exception that this time a similarity lookup over\nthe hashtable is performed, instead of an insertion. This lookup also allows us to compute a\ntransformation to the ground plane for the scene pairs. After this point, computing the rotational\ncomponent of the pose reduces to computation of the difference \\f$\\alpha=\\alpha_m-\\alpha_s\\f$. This\ncomponent carries the cue about the object pose. A Hough-like voting scheme is performed over the\nlocal model coordinate vector and \\f$\\alpha\\f$. The highest poses achieved for every scene point lets us\nrecover the object pose.\n\n### Source Code for PPF Matching\n\n~~~{cpp}\n// pc is the loaded point cloud of the model\n// (Nx6) and pcTest is a loaded point cloud of\n// the scene (Mx6)\nppf_match_3d::PPF3DDetector detector(0.03, 0.05);\ndetector.trainModel(pc);\nvector<Pose3DPtr> results;\ndetector.match(pcTest, results, 1.0/10.0, 0.05);\ncout << \"Poses: \" << endl;\n// print the poses\nfor (size_t i=0; i<results.size(); i++)\n{\n    Pose3DPtr pose = results[i];\n    cout << \"Pose Result \" << i << endl;\n    pose->printPose();\n}\n~~~\n\nPose Registration via ICP\n-------------------------\n\nThe matching process terminates with the attainment of the pose. However, due to the multiple\nmatching points, erroneous hypothesis, pose averaging and etc. such pose is very open to noise and\nmany times is far from being perfect. Although the visual results obtained in that stage are\npleasing, the quantitative evaluation shows \\f$~10\\f$ degrees variation (error), which is an acceptable\nlevel of matching. Many times, the requirement might be set well beyond this margin and it is\ndesired to refine the computed pose.\n\nFurthermore, in typical RGBD scenes and point clouds, 3D structure can capture only less than half\nof the model due to the visibility in the scene. Therefore, a robust pose refinement algorithm,\nwhich can register occluded and partially visible shapes quickly and correctly is not an unrealistic\nwish.\n\nAt this point, a trivial option would be to use the well known iterative closest point algorithm .\nHowever, utilization of the basic ICP leads to slow convergence, bad registration, outlier\nsensitivity and failure to register partial shapes. Thus, it is definitely not suited to the\nproblem. For this reason, many variants have been proposed . Different variants contribute to\ndifferent stages of the pose estimation process.\n\nICP is composed of \\f$6\\f$ stages and the improvements I propose for each stage is summarized below.\n\n### Sampling\n\nTo improve convergence speed and computation time, it is common to use less points than the model\nactually has. However, sampling the correct points to register is an issue in itself. The naive way\nwould be to sample uniformly and hope to get a reasonable subset. More smarter ways try to identify\nthe critical points, which are found to highly contribute to the registration process. Gelfand et.\nal. exploit the covariance matrix in order to constrain the eigenspace, so that a set of points\nwhich affect both translation and rotation are used. This is a clever way of subsampling, which I\nwill optionally be using in the implementation.\n\n### Correspondence Search\n\nAs the name implies, this step is actually the assignment of the points in the data and the model in\na closest point fashion. Correct assignments will lead to a correct pose, where wrong assignments\nstrongly degrade the result. In general, KD-trees are used in the search of nearest neighbors, to\nincrease the speed. However this is not an optimality guarantee and many times causes wrong points\nto be matched. Luckily the assignments are corrected over iterations.\n\nTo overcome some of the limitations, Picky ICP @cite pickyicp and BC-ICP (ICP using bi-unique\ncorrespondences) are two well-known methods. Picky ICP first finds the correspondences in the\nold-fashioned way and then among the resulting corresponding pairs, if more than one scene point\n\\f$p_i\\f$ is assigned to the same model point \\f$m_j\\f$, it selects \\f$p_i\\f$ that corresponds to the minimum\ndistance. BC-ICP on the other hand, allows multiple correspondences first and then resolves the\nassignments by establishing bi-unique correspondences. It also defines a novel no-correspondence\noutlier, which intrinsically eases the process of identifying outliers.\n\nFor reference, both methods are used. Because P-ICP is a bit faster, with not-so-significant\nperformance drawback, it will be the method of choice in refinment of correspondences.\n\n### Weighting of Pairs\n\nIn my implementation, I currently do not use a weighting scheme. But the common approaches involve\n*normal compatibility* (\\f$w_i=n^1_i\\cdot n^2_j\\f$) or assigning lower weights to point pairs with\ngreater distances (\\f$w=1-\\frac{||dist(m_i,s_i)||_2}{dist_{max}}\\f$).\n\n### Rejection of Pairs\n\nThe rejections are done using a dynamic thresholding based on a robust estimate of the standard\ndeviation. In other words, in each iteration, I find the MAD estimate of the Std. Dev. I denote this\nas \\f$mad_i\\f$. I reject the pairs with distances \\f$d_i>\\tau mad_i\\f$. Here \\f$\\tau\\f$ is the threshold of\nrejection and by default set to \\f$3\\f$. The weighting is applied prior to Picky refinement, explained\nin the previous stage.\n\n### Error Metric\n\nAs described in , a linearization of point to plane as in @cite koklimlow error metric is used. This\nboth speeds up the registration process and improves convergence.\n\n### Minimization\n\nEven though many non-linear optimizers (such as Levenberg Mardquardt) are proposed, due to the\nlinearization in the previous step, pose estimation reduces to solving a linear system of equations.\nThis is what I do exactly using cv::solve with DECOMP_SVD option.\n\n### ICP Algorithm\n\nHaving described the steps above, here I summarize the layout of the ICP algorithm.\n\n#### Efficient ICP Through Point Cloud Pyramids\n\nWhile the up-to-now-proposed variants deal well with some outliers and bad initializations, they\nrequire significant number of iterations. Yet, multi-resolution scheme can help reducing the number\nof iterations by allowing the registration to start from a coarse level and propagate to the lower\nand finer levels. Such approach both improves the performances and enhances the runtime.\n\nThe search is done through multiple levels, in a hierarchical fashion. The registration starts with\na very coarse set of samples of the model. Iteratively, the points are densified and sought. After\neach iteration the previously estimated pose is used as an initial pose and refined with the ICP.\n\n#### Visual Results\n\n##### Results on Synthetic Data\n\nIn all of the results, the pose is initiated by PPF and the rest is left as:\n\\f$[\\theta_x, \\theta_y, \\theta_z, t_x, t_y, t_z]=[0]\\f$\n\n### Source Code for Pose Refinement Using ICP\n\n~~~{cpp}\nICP icp(200, 0.001f, 2.5f, 8);\n// Using the previously declared pc and pcTest\n// This will perform registration for every pose\n// contained in results\nicp.registerModelToScene(pc, pcTest, results);\n\n// results now contain the refined poses\n~~~\n\nResults\n-------\n\nThis section is dedicated to the results of surface matching (point-pair-feature matching and a\nfollowing ICP refinement):\n\n![Several matches of a single frog model using ppf + icp](img/gsoc_forg_matches.jpg)\n\nMatches of different models for Mian dataset is presented below:\n\n![Matches of different models for Mian dataset](img/snapshot27.jpg)\n\nYou might checkout the video on [youTube here](http://www.youtube.com/watch?v=uFnqLFznuZU).\n\nA Complete Sample\n-----------------\n\n### Parameter Tuning\n\nSurface matching module treats its parameters relative to the model diameter (diameter of the axis\nparallel bounding box), whenever it can. This makes the parameters independent from the model size.\nThis is why, both model and scene cloud were subsampled such that all points have a minimum distance\nof \\f$RelativeSamplingStep*DimensionRange\\f$, where \\f$DimensionRange\\f$ is the distance along a given\ndimension. All three dimensions are sampled in similar manner. For example, if\n\\f$RelativeSamplingStep\\f$ is set to 0.05 and the diameter of model is 1m (1000mm), the points sampled\nfrom the object's surface will be approximately 50 mm apart. From another point of view, if the\nsampling RelativeSamplingStep is set to 0.05, at most \\f$20x20x20 = 8000\\f$ model points are generated\n(depending on how the model fills in the volume). Consequently this results in at most 8000x8000\npairs. In practice, because the models are not uniformly distributed over a rectangular prism, much\nless points are to be expected. Decreasing this value, results in more model points and thus a more\naccurate representation. However, note that number of point pair features to be computed is now\nquadratically increased as the complexity is O(N\\^2). This is especially a concern for 32 bit\nsystems, where large models can easily overshoot the available memory. Typically, values in the\nrange of 0.025 - 0.05 seem adequate for most of the applications, where the default value is 0.03.\n(Note that there is a difference in this paremeter with the one presented in @cite drost2010 . In\n@cite drost2010 a uniform cuboid is used for quantization and model diameter is used for reference of\nsampling. In my implementation, the cuboid is a rectangular prism, and each dimension is quantized\nindependently. I do not take reference from the diameter but along the individual dimensions.\n\nIt would very wise to remove the outliers from the model and prepare an ideal model initially. This\nis because, the outliers directly affect the relative computations and degrade the matching\naccuracy.\n\nDuring runtime stage, the scene is again sampled by \\f$RelativeSamplingStep\\f$, as described above.\nHowever this time, only a portion of the scene points are used as reference. This portion is\ncontrolled by the parameter \\f$RelativeSceneSampleStep\\f$, where\n\\f$SceneSampleStep = (int)(1.0/RelativeSceneSampleStep)\\f$. In other words, if the\n\\f$RelativeSceneSampleStep = 1.0/5.0\\f$, the subsampled scene will once again be uniformly sampled to\n1/5 of the number of points. Maximum value of this parameter is 1 and increasing this parameter also\nincreases the stability, but decreases the speed. Again, because of the initial scene-independent\nrelative sampling, fine tuning this parameter is not a big concern. This would only be an issue when\nthe model shape occupies a volume uniformly, or when the model shape is condensed in a tiny place\nwithin the quantization volume (e.g. The octree representation would have too much empty cells).\n\n\\f$RelativeDistanceStep\\f$ acts as a step of discretization over the hash table. The point pair features\nare quantized to be mapped to the buckets of the hashtable. This discretization involves a\nmultiplication and a casting to the integer. Adjusting RelativeDistanceStep in theory controls the\ncollision rate. Note that, more collisions on the hashtable results in less accurate estimations.\nReducing this parameter increases the affect of quantization but starts to assign non-similar point\npairs to the same bins. Increasing it however, wanes the ability to group the similar pairs.\nGenerally, because during the sampling stage, the training model points are selected uniformly with\na distance controlled by RelativeSamplingStep, RelativeDistanceStep is expected to equate to this\nvalue. Yet again, values in the range of 0.025-0.05 are sensible. This time however, when the model\nis dense, it is not advised to decrease this value. For noisy scenes, the value can be increased to\nimprove the robustness of the matching against noisy points.\n\n*/\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/text/erfilter.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_TEXT_ERFILTER_HPP__\n#define __OPENCV_TEXT_ERFILTER_HPP__\n\n#include \"opencv2/core.hpp\"\n#include <vector>\n#include <deque>\n#include <string>\n\nnamespace cv\n{\nnamespace text\n{\n\n//! @addtogroup text_detect\n//! @{\n\n/** @brief The ERStat structure represents a class-specific Extremal Region (ER).\n\nAn ER is a 4-connected set of pixels with all its grey-level values smaller than the values in its\nouter boundary. A class-specific ER is selected (using a classifier) from all the ER's in the\ncomponent tree of the image. :\n */\nstruct CV_EXPORTS ERStat\n{\npublic:\n    //! Constructor\n    explicit ERStat(int level = 256, int pixel = 0, int x = 0, int y = 0);\n    //! Destructor\n    ~ERStat() { }\n\n    //! seed point and the threshold (max grey-level value)\n    int pixel;\n    int level;\n\n    //! incrementally computable features\n    int area;\n    int perimeter;\n    int euler;                 //!< euler number\n    Rect rect;\n    double raw_moments[2];     //!< order 1 raw moments to derive the centroid\n    double central_moments[3]; //!< order 2 central moments to construct the covariance matrix\n    std::deque<int> *crossings;//!< horizontal crossings\n    float med_crossings;       //!< median of the crossings at three different height levels\n\n    //! 2nd stage features\n    float hole_area_ratio;\n    float convex_hull_ratio;\n    float num_inflexion_points;\n\n    // TODO Other features can be added (average color, standard deviation, and such)\n\n\n    // TODO shall we include the pixel list whenever available (i.e. after 2nd stage) ?\n    std::vector<int> *pixels;\n\n    //! probability that the ER belongs to the class we are looking for\n    double probability;\n\n    //! pointers preserving the tree structure of the component tree\n    ERStat* parent;\n    ERStat* child;\n    ERStat* next;\n    ERStat* prev;\n\n    //! wenever the regions is a local maxima of the probability\n    bool local_maxima;\n    ERStat* max_probability_ancestor;\n    ERStat* min_probability_ancestor;\n};\n\n/** @brief Base class for 1st and 2nd stages of Neumann and Matas scene text detection algorithm [Neumann12]. :\n\nExtracts the component tree (if needed) and filter the extremal regions (ER's) by using a given classifier.\n */\nclass CV_EXPORTS ERFilter : public Algorithm\n{\npublic:\n\n    /** @brief Callback with the classifier is made a class.\n\n    By doing it we hide SVM, Boost etc. Developers can provide their own classifiers to the\n    ERFilter algorithm.\n     */\n    class CV_EXPORTS Callback\n    {\n    public:\n        virtual ~Callback() { }\n        /** @brief The classifier must return probability measure for the region.\n\n        @param  stat :   The region to be classified\n         */\n        virtual double eval(const ERStat& stat) = 0; //const = 0; //TODO why cannot use const = 0 here?\n    };\n\n    /** @brief The key method of ERFilter algorithm.\n\n    Takes image on input and returns the selected regions in a vector of ERStat only distinctive\n    ERs which correspond to characters are selected by a sequential classifier\n\n    @param image Single channel image CV_8UC1\n\n    @param regions Output for the 1st stage and Input/Output for the 2nd. The selected Extremal Regions\n    are stored here.\n\n    Extracts the component tree (if needed) and filter the extremal regions (ER's) by using a given\n    classifier.\n     */\n    virtual void run( InputArray image, std::vector<ERStat>& regions ) = 0;\n\n\n    //! set/get methods to set the algorithm properties,\n    virtual void setCallback(const Ptr<ERFilter::Callback>& cb) = 0;\n    virtual void setThresholdDelta(int thresholdDelta) = 0;\n    virtual void setMinArea(float minArea) = 0;\n    virtual void setMaxArea(float maxArea) = 0;\n    virtual void setMinProbability(float minProbability) = 0;\n    virtual void setMinProbabilityDiff(float minProbabilityDiff) = 0;\n    virtual void setNonMaxSuppression(bool nonMaxSuppression) = 0;\n    virtual int  getNumRejected() = 0;\n};\n\n\n/*!\n    Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm\n    Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012\n\n    The component tree of the image is extracted by a threshold increased step by step\n    from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,\n    number of holes, and number of horizontal crossings) are computed for each ER\n    and used as features for a classifier which estimates the class-conditional\n    probability P(er|character). The value of P(er|character) is tracked using the inclusion\n    relation of ER across all thresholds and only the ERs which correspond to local maximum\n    of the probability P(er|character) are selected (if the local maximum of the\n    probability is above a global limit pmin and the difference between local maximum and\n    local minimum is greater than minProbabilityDiff).\n\n    @param cb – Callback with the classifier. Default classifier can be implicitly load with function\n        loadClassifierNM1(), e.g. from file in samples/cpp/trained_classifierNM1.xml\n    @param thresholdDelta – Threshold step in subsequent thresholds when extracting the component tree\n    @param minArea – The minimum area (% of image size) allowed for retreived ER’s\n    @param maxArea – The maximum area (% of image size) allowed for retreived ER’s\n    @param minProbability – The minimum probability P(er|character) allowed for retreived ER’s\n    @param nonMaxSuppression – Whenever non-maximum suppression is done over the branch probabilities\n    @param minProbabilityDiff – The minimum probability difference between local maxima and local minima ERs\n*/\n\n/** @brief Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm [Neumann12].\n\n@param  cb :   Callback with the classifier. Default classifier can be implicitly load with function\nloadClassifierNM1, e.g. from file in samples/cpp/trained_classifierNM1.xml\n@param  thresholdDelta :   Threshold step in subsequent thresholds when extracting the component tree\n@param  minArea :   The minimum area (% of image size) allowed for retreived ER's\n@param  minArea :   The maximum area (% of image size) allowed for retreived ER's\n@param  minProbability :   The minimum probability P(er|character) allowed for retreived ER's\n@param  nonMaxSuppression :   Whenever non-maximum suppression is done over the branch probabilities\n@param  minProbability :   The minimum probability difference between local maxima and local minima ERs\n\nThe component tree of the image is extracted by a threshold increased step by step from 0 to 255,\nincrementally computable descriptors (aspect_ratio, compactness, number of holes, and number of\nhorizontal crossings) are computed for each ER and used as features for a classifier which estimates\nthe class-conditional probability P(er|character). The value of P(er|character) is tracked using the\ninclusion relation of ER across all thresholds and only the ERs which correspond to local maximum of\nthe probability P(er|character) are selected (if the local maximum of the probability is above a\nglobal limit pmin and the difference between local maximum and local minimum is greater than\nminProbabilityDiff).\n */\nCV_EXPORTS Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb,\n                                                  int thresholdDelta = 1, float minArea = 0.00025,\n                                                  float maxArea = 0.13, float minProbability = 0.4,\n                                                  bool nonMaxSuppression = true,\n                                                  float minProbabilityDiff = 0.1);\n\n/** @brief Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm [Neumann12].\n\n@param  cb :   Callback with the classifier. Default classifier can be implicitly load with function\nloadClassifierNM2, e.g. from file in samples/cpp/trained_classifierNM2.xml\n@param  minProbability :   The minimum probability P(er|character) allowed for retreived ER's\n\nIn the second stage, the ERs that passed the first stage are classified into character and\nnon-character classes using more informative but also more computationally expensive features. The\nclassifier uses all the features calculated in the first stage and the following additional\nfeatures: hole area ratio, convex hull ratio, and number of outer inflexion points.\n */\nCV_EXPORTS Ptr<ERFilter> createERFilterNM2(const Ptr<ERFilter::Callback>& cb,\n                                                  float minProbability = 0.3);\n\n\n/** @brief Allow to implicitly load the default classifier when creating an ERFilter object.\n\n@param filename The XML or YAML file with the classifier model (e.g. trained_classifierNM1.xml)\n\nreturns a pointer to ERFilter::Callback.\n */\nCV_EXPORTS Ptr<ERFilter::Callback> loadClassifierNM1(const std::string& filename);\n\n/** @brief Allow to implicitly load the default classifier when creating an ERFilter object.\n\n@param filename The XML or YAML file with the classifier model (e.g. trained_classifierNM2.xml)\n\nreturns a pointer to ERFilter::Callback.\n */\nCV_EXPORTS Ptr<ERFilter::Callback> loadClassifierNM2(const std::string& filename);\n\n\n//! computeNMChannels operation modes\nenum { ERFILTER_NM_RGBLGrad,\n       ERFILTER_NM_IHSGrad\n     };\n\n/** @brief Compute the different channels to be processed independently in the N&M algorithm [Neumann12].\n\n@param _src Source image. Must be RGB CV_8UC3.\n\n@param _channels Output vector\\<Mat\\> where computed channels are stored.\n\n@param _mode Mode of operation. Currently the only available options are:\n**ERFILTER_NM_RGBLGrad** (used by default) and **ERFILTER_NM_IHSGrad**.\n\nIn N&M algorithm, the combination of intensity (I), hue (H), saturation (S), and gradient magnitude\nchannels (Grad) are used in order to obtain high localization recall. This implementation also\nprovides an alternative combination of red (R), green (G), blue (B), lightness (L), and gradient\nmagnitude (Grad).\n */\nCV_EXPORTS void computeNMChannels(InputArray _src, OutputArrayOfArrays _channels, int _mode = ERFILTER_NM_RGBLGrad);\n\n\n\n//! text::erGrouping operation modes\nenum erGrouping_Modes {\n\n    /** Exhaustive Search algorithm proposed in [Neumann11] for grouping horizontally aligned text.\n    The algorithm models a verification function for all the possible ER sequences. The\n    verification fuction for ER pairs consists in a set of threshold-based pairwise rules which\n    compare measurements of two regions (height ratio, centroid angle, and region distance). The\n    verification function for ER triplets creates a word text line estimate using Least\n    Median-Squares fitting for a given triplet and then verifies that the estimate is valid (based\n    on thresholds created during training). Verification functions for sequences larger than 3 are\n    approximated by verifying that the text line parameters of all (sub)sequences of length 3 are\n    consistent.\n    */\n    ERGROUPING_ORIENTATION_HORIZ,\n    /** Text grouping method proposed in [Gomez13][Gomez14] for grouping arbitrary oriented text. Regions\n    are agglomerated by Single Linkage Clustering in a weighted feature space that combines proximity\n    (x,y coordinates) and similarity measures (color, size, gradient magnitude, stroke width, etc.).\n    SLC provides a dendrogram where each node represents a text group hypothesis. Then the algorithm\n    finds the branches corresponding to text groups by traversing this dendrogram with a stopping rule\n    that combines the output of a rotation invariant text group classifier and a probabilistic measure\n    for hierarchical clustering validity assessment.\n     */\n    ERGROUPING_ORIENTATION_ANY\n};\n\n/** @brief Find groups of Extremal Regions that are organized as text blocks.\n\n@param img Original RGB or Greyscale image from wich the regions were extracted.\n\n@param channels Vector of single channel images CV_8UC1 from wich the regions were extracted.\n\n@param regions Vector of ER's retreived from the ERFilter algorithm from each channel.\n\n@param groups The output of the algorithm is stored in this parameter as set of lists of indexes to\nprovided regions.\n\n@param groups_rects The output of the algorithm are stored in this parameter as list of rectangles.\n\n@param method Grouping method (see text::erGrouping_Modes). Can be one of ERGROUPING_ORIENTATION_HORIZ,\nERGROUPING_ORIENTATION_ANY.\n\n@param filename The XML or YAML file with the classifier model (e.g.\nsamples/trained_classifier_erGrouping.xml). Only to use when grouping method is\nERGROUPING_ORIENTATION_ANY.\n\n@param minProbablity The minimum probability for accepting a group. Only to use when grouping\nmethod is ERGROUPING_ORIENTATION_ANY.\n */\nCV_EXPORTS void erGrouping(InputArray img, InputArrayOfArrays channels,\n                                           std::vector<std::vector<ERStat> > &regions,\n                                           std::vector<std::vector<Vec2i> > &groups,\n                                           std::vector<Rect> &groups_rects,\n                                           int method = ERGROUPING_ORIENTATION_HORIZ,\n                                           const std::string& filename = std::string(),\n                                           float minProbablity = 0.5);\n\n/** @brief Converts MSER contours (vector\\<Point\\>) to ERStat regions.\n\n@param image Source image CV_8UC1 from which the MSERs where extracted.\n\n@param contours Intput vector with all the contours (vector\\<Point\\>).\n\n@param regions Output where the ERStat regions are stored.\n\nIt takes as input the contours provided by the OpenCV MSER feature detector and returns as output\ntwo vectors of ERStats. This is because MSER() output contains both MSER+ and MSER- regions in a\nsingle vector\\<Point\\>, the function separates them in two different vectors (this is as if the\nERStats where extracted from two different channels).\n\nAn example of MSERsToERStats in use can be found in the text detection webcam_demo:\n<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>\n */\nCV_EXPORTS void MSERsToERStats(InputArray image, std::vector<std::vector<Point> > &contours,\n                               std::vector<std::vector<ERStat> > &regions);\n\n//! @}\n\n}\n}\n#endif // _OPENCV_TEXT_ERFILTER_HPP_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/text/ocr.hpp",
    "content": "/*M//////////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_TEXT_OCR_HPP__\n#define __OPENCV_TEXT_OCR_HPP__\n\n#include <vector>\n#include <string>\n\nnamespace cv\n{\nnamespace text\n{\n\n//! @addtogroup text_recognize\n//! @{\n\nenum\n{\n    OCR_LEVEL_WORD,\n    OCR_LEVEL_TEXTLINE\n};\n\n//base class BaseOCR declares a common API that would be used in a typical text recognition scenario\nclass CV_EXPORTS_W BaseOCR\n{\npublic:\n    virtual ~BaseOCR() {};\n    virtual void run(Mat& image, std::string& output_text, std::vector<Rect>* component_rects=NULL,\n                     std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,\n                     int component_level=0) = 0;\n    virtual void run(Mat& image, Mat& mask, std::string& output_text, std::vector<Rect>* component_rects=NULL,\n                     std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,\n                     int component_level=0) = 0;\n};\n\n/** @brief OCRTesseract class provides an interface with the tesseract-ocr API (v3.02.02) in C++.\n\nNotice that it is compiled only when tesseract-ocr is correctly installed.\n\n@note\n   -   (C++) An example of OCRTesseract recognition combined with scene text detection can be found\n        at the end_to_end_recognition demo:\n        <https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/end_to_end_recognition.cpp>\n    -   (C++) Another example of OCRTesseract recognition combined with scene text detection can be\n        found at the webcam_demo:\n        <https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>\n */\nclass CV_EXPORTS_W OCRTesseract : public BaseOCR\n{\npublic:\n    /** @brief Recognize text using the tesseract-ocr API.\n\n    Takes image on input and returns recognized text in the output_text parameter. Optionally\n    provides also the Rects for individual text elements found (e.g. words), and the list of those\n    text elements with their confidence values.\n\n    @param image Input image CV_8UC1 or CV_8UC3\n    @param output_text Output text of the tesseract-ocr.\n    @param component_rects If provided the method will output a list of Rects for the individual\n    text elements found (e.g. words or text lines).\n    @param component_texts If provided the method will output a list of text strings for the\n    recognition of individual text elements found (e.g. words or text lines).\n    @param component_confidences If provided the method will output a list of confidence values\n    for the recognition of individual text elements found (e.g. words or text lines).\n    @param component_level OCR_LEVEL_WORD (by default), or OCR_LEVEL_TEXT_LINE.\n     */\n    virtual void run(Mat& image, std::string& output_text, std::vector<Rect>* component_rects=NULL,\n                     std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,\n                     int component_level=0);\n\n    virtual void run(Mat& image, Mat& mask, std::string& output_text, std::vector<Rect>* component_rects=NULL,\n                     std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,\n                     int component_level=0);\n\n    // aliases for scripting\n    CV_WRAP String run(InputArray image, int min_confidence, int component_level=0);\n\n    CV_WRAP String run(InputArray image, InputArray mask, int min_confidence, int component_level=0);\n\n    CV_WRAP virtual void setWhiteList(const String& char_whitelist) = 0;\n\n\n    /** @brief Creates an instance of the OCRTesseract class. Initializes Tesseract.\n\n    @param datapath the name of the parent directory of tessdata ended with \"/\", or NULL to use the\n    system's default directory.\n    @param language an ISO 639-3 code or NULL will default to \"eng\".\n    @param char_whitelist specifies the list of characters used for recognition. NULL defaults to\n    \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\".\n    @param oem tesseract-ocr offers different OCR Engine Modes (OEM), by deffault\n    tesseract::OEM_DEFAULT is used. See the tesseract-ocr API documentation for other possible\n    values.\n    @param psmode tesseract-ocr offers different Page Segmentation Modes (PSM) tesseract::PSM_AUTO\n    (fully automatic layout analysis) is used. See the tesseract-ocr API documentation for other\n    possible values.\n     */\n    CV_WRAP static Ptr<OCRTesseract> create(const char* datapath=NULL, const char* language=NULL,\n                                    const char* char_whitelist=NULL, int oem=3, int psmode=3);\n};\n\n\n/* OCR HMM Decoder */\n\nenum decoder_mode\n{\n    OCR_DECODER_VITERBI = 0 // Other algorithms may be added\n};\n\n/** @brief OCRHMMDecoder class provides an interface for OCR using Hidden Markov Models.\n\n@note\n   -   (C++) An example on using OCRHMMDecoder recognition combined with scene text detection can\n        be found at the webcam_demo sample:\n        <https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>\n */\nclass CV_EXPORTS_W OCRHMMDecoder : public BaseOCR\n{\npublic:\n\n    /** @brief Callback with the character classifier is made a class.\n\n    This way it hides the feature extractor and the classifier itself, so developers can write\n    their own OCR code.\n\n    The default character classifier and feature extractor can be loaded using the utility funtion\n    loadOCRHMMClassifierNM and KNN model provided in\n    <https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/OCRHMM_knn_model_data.xml.gz>.\n     */\n    class CV_EXPORTS_W ClassifierCallback\n    {\n    public:\n        virtual ~ClassifierCallback() { }\n        /** @brief The character classifier must return a (ranked list of) class(es) id('s)\n\n        @param image Input image CV_8UC1 or CV_8UC3 with a single letter.\n        @param out_class The classifier returns the character class categorical label, or list of\n        class labels, to which the input image corresponds.\n        @param out_confidence The classifier returns the probability of the input image\n        corresponding to each classes in out_class.\n         */\n        virtual void eval( InputArray image, std::vector<int>& out_class, std::vector<double>& out_confidence);\n    };\n\npublic:\n    /** @brief Recognize text using HMM.\n\n    Takes binary image on input and returns recognized text in the output_text parameter. Optionally\n    provides also the Rects for individual text elements found (e.g. words), and the list of those\n    text elements with their confidence values.\n\n    @param image Input binary image CV_8UC1 with a single text line (or word).\n\n    @param output_text Output text. Most likely character sequence found by the HMM decoder.\n\n    @param component_rects If provided the method will output a list of Rects for the individual\n    text elements found (e.g. words).\n\n    @param component_texts If provided the method will output a list of text strings for the\n    recognition of individual text elements found (e.g. words).\n\n    @param component_confidences If provided the method will output a list of confidence values\n    for the recognition of individual text elements found (e.g. words).\n\n    @param component_level Only OCR_LEVEL_WORD is supported.\n     */\n    virtual void run(Mat& image, std::string& output_text, std::vector<Rect>* component_rects=NULL,\n                     std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,\n                     int component_level=0);\n\n    /** @brief Recognize text using HMM.\n\n    Takes an image and a mask (where each connected component corresponds to a segmented character)\n    on input and returns recognized text in the output_text parameter. Optionally\n    provides also the Rects for individual text elements found (e.g. words), and the list of those\n    text elements with their confidence values.\n\n    @param image Input image CV_8UC1 or CV_8UC3 with a single text line (or word).\n    @param mask Input binary image CV_8UC1 same size as input image. Each connected component in mask corresponds to a segmented character in the input image.\n\n    @param output_text Output text. Most likely character sequence found by the HMM decoder.\n\n    @param component_rects If provided the method will output a list of Rects for the individual\n    text elements found (e.g. words).\n\n    @param component_texts If provided the method will output a list of text strings for the\n    recognition of individual text elements found (e.g. words).\n\n    @param component_confidences If provided the method will output a list of confidence values\n    for the recognition of individual text elements found (e.g. words).\n\n    @param component_level Only OCR_LEVEL_WORD is supported.\n     */\n    virtual void run(Mat& image, Mat& mask, std::string& output_text, std::vector<Rect>* component_rects=NULL,\n                     std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,\n                     int component_level=0);\n\n    // aliases for scripting\n    CV_WRAP String run(InputArray image, int min_confidence, int component_level=0);\n\n    CV_WRAP String run(InputArray image, InputArray mask, int min_confidence, int component_level=0);\n\n    /** @brief Creates an instance of the OCRHMMDecoder class. Initializes HMMDecoder.\n\n    @param classifier The character classifier with built in feature extractor.\n\n    @param vocabulary The language vocabulary (chars when ascii english text). vocabulary.size()\n    must be equal to the number of classes of the classifier.\n\n    @param transition_probabilities_table Table with transition probabilities between character\n    pairs. cols == rows == vocabulary.size().\n\n    @param emission_probabilities_table Table with observation emission probabilities. cols ==\n    rows == vocabulary.size().\n\n    @param mode HMM Decoding algorithm. Only OCR_DECODER_VITERBI is available for the moment\n    (<http://en.wikipedia.org/wiki/Viterbi_algorithm>).\n     */\n    static Ptr<OCRHMMDecoder> create(const Ptr<OCRHMMDecoder::ClassifierCallback> classifier,// The character classifier with built in feature extractor\n                                     const std::string& vocabulary,                    // The language vocabulary (chars when ascii english text)\n                                                                                       //     size() must be equal to the number of classes\n                                     InputArray transition_probabilities_table,        // Table with transition probabilities between character pairs\n                                                                                       //     cols == rows == vocabulari.size()\n                                     InputArray emission_probabilities_table,          // Table with observation emission probabilities\n                                                                                       //     cols == rows == vocabulari.size()\n                                     decoder_mode mode = OCR_DECODER_VITERBI);         // HMM Decoding algorithm (only Viterbi for the moment)\n\n    CV_WRAP static Ptr<OCRHMMDecoder> create(const Ptr<OCRHMMDecoder::ClassifierCallback> classifier,// The character classifier with built in feature extractor\n                                     const String& vocabulary,                    // The language vocabulary (chars when ascii english text)\n                                                                                       //     size() must be equal to the number of classes\n                                     InputArray transition_probabilities_table,        // Table with transition probabilities between character pairs\n                                                                                       //     cols == rows == vocabulari.size()\n                                     InputArray emission_probabilities_table,          // Table with observation emission probabilities\n                                                                                       //     cols == rows == vocabulari.size()\n                                     int mode = OCR_DECODER_VITERBI);         // HMM Decoding algorithm (only Viterbi for the moment)\n\nprotected:\n\n    Ptr<OCRHMMDecoder::ClassifierCallback> classifier;\n    std::string vocabulary;\n    Mat transition_p;\n    Mat emission_p;\n    decoder_mode mode;\n};\n\n/** @brief Allow to implicitly load the default character classifier when creating an OCRHMMDecoder object.\n\n@param filename The XML or YAML file with the classifier model (e.g. OCRHMM_knn_model_data.xml)\n\nThe KNN default classifier is based in the scene text recognition method proposed by Lukás Neumann &\nJiri Matas in [Neumann11b]. Basically, the region (contour) in the input image is normalized to a\nfixed size, while retaining the centroid and aspect ratio, in order to extract a feature vector\nbased on gradient orientations along the chain-code of its perimeter. Then, the region is classified\nusing a KNN model trained with synthetic data of rendered characters with different standard font\ntypes.\n */\n\nCV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifierNM(const String& filename);\n\n/** @brief Allow to implicitly load the default character classifier when creating an OCRHMMDecoder object.\n\n@param filename The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)\n\nThe CNN default classifier is based in the scene text recognition method proposed by Adam Coates &\nAndrew NG in [Coates11a]. The character classifier consists in a Single Layer Convolutional Neural Network and\na linear classifier. It is applied to the input image in a sliding window fashion, providing a set of recognitions\nat each window location.\n */\nCV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifierCNN(const String& filename);\n\n//! @}\n\n/** @brief Utility function to create a tailored language model transitions table from a given list of words (lexicon).\n *\n * @param vocabulary The language vocabulary (chars when ascii english text).\n *\n * @param lexicon The list of words that are expected to be found in a particular image.\n *\n * @param transition_probabilities_table Output table with transition probabilities between character pairs. cols == rows == vocabulary.size().\n *\n * The function calculate frequency statistics of character pairs from the given lexicon and fills the output transition_probabilities_table with them. The transition_probabilities_table can be used as input in the OCRHMMDecoder::create() and OCRBeamSearchDecoder::create() methods.\n * @note\n *    -   (C++) An alternative would be to load the default generic language transition table provided in the text module samples folder (created from ispell 42869 english words list) :\n *            <https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/OCRHMM_transitions_table.xml>\n **/\nCV_EXPORTS void createOCRHMMTransitionsTable(std::string& vocabulary, std::vector<std::string>& lexicon, OutputArray transition_probabilities_table);\n\nCV_EXPORTS_W Mat createOCRHMMTransitionsTable(const String& vocabulary, std::vector<cv::String>& lexicon);\n\n\n/* OCR BeamSearch Decoder */\n\n/** @brief OCRBeamSearchDecoder class provides an interface for OCR using Beam Search algorithm.\n\n@note\n   -   (C++) An example on using OCRBeamSearchDecoder recognition combined with scene text detection can\n        be found at the demo sample:\n        <https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/word_recognition.cpp>\n */\nclass CV_EXPORTS_W OCRBeamSearchDecoder : public BaseOCR\n{\npublic:\n\n    /** @brief Callback with the character classifier is made a class.\n\n    This way it hides the feature extractor and the classifier itself, so developers can write\n    their own OCR code.\n\n    The default character classifier and feature extractor can be loaded using the utility funtion\n    loadOCRBeamSearchClassifierCNN with all its parameters provided in\n    <https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/OCRBeamSearch_CNN_model_data.xml.gz>.\n     */\n    class CV_EXPORTS_W ClassifierCallback\n    {\n    public:\n        virtual ~ClassifierCallback() { }\n        /** @brief The character classifier must return a (ranked list of) class(es) id('s)\n\n        @param image Input image CV_8UC1 or CV_8UC3 with a single letter.\n        @param recognition_probabilities For each of the N characters found the classifier returns a list with\n        class probabilities for each class.\n        @param oversegmentation The classifier returns a list of N+1 character locations' x-coordinates,\n        including 0 as start-sequence location.\n         */\n        virtual void eval( InputArray image, std::vector< std::vector<double> >& recognition_probabilities, std::vector<int>& oversegmentation );\n\n        int getWindowSize() {return 0;}\n        int getStepSize() {return 0;}\n    };\n\npublic:\n    /** @brief Recognize text using Beam Search.\n\n    Takes image on input and returns recognized text in the output_text parameter. Optionally\n    provides also the Rects for individual text elements found (e.g. words), and the list of those\n    text elements with their confidence values.\n\n    @param image Input binary image CV_8UC1 with a single text line (or word).\n\n    @param output_text Output text. Most likely character sequence found by the HMM decoder.\n\n    @param component_rects If provided the method will output a list of Rects for the individual\n    text elements found (e.g. words).\n\n    @param component_texts If provided the method will output a list of text strings for the\n    recognition of individual text elements found (e.g. words).\n\n    @param component_confidences If provided the method will output a list of confidence values\n    for the recognition of individual text elements found (e.g. words).\n\n    @param component_level Only OCR_LEVEL_WORD is supported.\n     */\n    virtual void run(Mat& image, std::string& output_text, std::vector<Rect>* component_rects=NULL,\n                     std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,\n                     int component_level=0);\n\n    virtual void run(Mat& image, Mat& mask, std::string& output_text, std::vector<Rect>* component_rects=NULL,\n                     std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL,\n                     int component_level=0);\n\n    // aliases for scripting\n    CV_WRAP String run(InputArray image, int min_confidence, int component_level=0);\n\n    CV_WRAP String run(InputArray image, InputArray mask, int min_confidence, int component_level=0);\n\n    /** @brief Creates an instance of the OCRBeamSearchDecoder class. Initializes HMMDecoder.\n\n    @param classifier The character classifier with built in feature extractor.\n\n    @param vocabulary The language vocabulary (chars when ascii english text). vocabulary.size()\n    must be equal to the number of classes of the classifier.\n\n    @param transition_probabilities_table Table with transition probabilities between character\n    pairs. cols == rows == vocabulary.size().\n\n    @param emission_probabilities_table Table with observation emission probabilities. cols ==\n    rows == vocabulary.size().\n\n    @param mode HMM Decoding algorithm. Only OCR_DECODER_VITERBI is available for the moment\n    (<http://en.wikipedia.org/wiki/Viterbi_algorithm>).\n\n    @param beam_size Size of the beam in Beam Search algorithm.\n     */\n    static Ptr<OCRBeamSearchDecoder> create(const Ptr<OCRBeamSearchDecoder::ClassifierCallback> classifier,// The character classifier with built in feature extractor\n                                     const std::string& vocabulary,                    // The language vocabulary (chars when ascii english text)\n                                                                                       //     size() must be equal to the number of classes\n                                     InputArray transition_probabilities_table,        // Table with transition probabilities between character pairs\n                                                                                       //     cols == rows == vocabulari.size()\n                                     InputArray emission_probabilities_table,          // Table with observation emission probabilities\n                                                                                       //     cols == rows == vocabulari.size()\n                                     decoder_mode mode = OCR_DECODER_VITERBI,          // HMM Decoding algorithm (only Viterbi for the moment)\n                                     int beam_size = 500);                              // Size of the beam in Beam Search algorithm\n\n    CV_WRAP static Ptr<OCRBeamSearchDecoder> create(const Ptr<OCRBeamSearchDecoder::ClassifierCallback> classifier, // The character classifier with built in feature extractor\n                                     const String& vocabulary,                    // The language vocabulary (chars when ascii english text)\n                                                                                       //     size() must be equal to the number of classes\n                                     InputArray transition_probabilities_table,        // Table with transition probabilities between character pairs\n                                                                                       //     cols == rows == vocabulari.size()\n                                     InputArray emission_probabilities_table,          // Table with observation emission probabilities\n                                                                                       //     cols == rows == vocabulari.size()\n                                     int mode = OCR_DECODER_VITERBI,          // HMM Decoding algorithm (only Viterbi for the moment)\n                                     int beam_size = 500);                              // Size of the beam in Beam Search algorithm\n\nprotected:\n\n    Ptr<OCRBeamSearchDecoder::ClassifierCallback> classifier;\n    std::string vocabulary;\n    Mat transition_p;\n    Mat emission_p;\n    decoder_mode mode;\n    int beam_size;\n};\n\n/** @brief Allow to implicitly load the default character classifier when creating an OCRBeamSearchDecoder object.\n\n@param filename The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)\n\nThe CNN default classifier is based in the scene text recognition method proposed by Adam Coates &\nAndrew NG in [Coates11a]. The character classifier consists in a Single Layer Convolutional Neural Network and\na linear classifier. It is applied to the input image in a sliding window fashion, providing a set of recognitions\nat each window location.\n */\n\nCV_EXPORTS_W Ptr<OCRBeamSearchDecoder::ClassifierCallback> loadOCRBeamSearchClassifierCNN(const String& filename);\n\n//! @}\n\n}\n}\n#endif // _OPENCV_TEXT_OCR_HPP_\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/text.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_TEXT_HPP__\n#define __OPENCV_TEXT_HPP__\n\n#include \"opencv2/text/erfilter.hpp\"\n#include \"opencv2/text/ocr.hpp\"\n\n/** @defgroup text Scene Text Detection and Recognition\n\nThe opencv_text module provides different algorithms for text detection and recognition in natural\nscene images.\n\n  @{\n    @defgroup text_detect Scene Text Detection\n\nClass-specific Extremal Regions for Scene Text Detection\n--------------------------------------------------------\n\nThe scene text detection algorithm described below has been initially proposed by Lukás Neumann &\nJiri Matas [Neumann12]. The main idea behind Class-specific Extremal Regions is similar to the MSER\nin that suitable Extremal Regions (ERs) are selected from the whole component tree of the image.\nHowever, this technique differs from MSER in that selection of suitable ERs is done by a sequential\nclassifier trained for character detection, i.e. dropping the stability requirement of MSERs and\nselecting class-specific (not necessarily stable) regions.\n\nThe component tree of an image is constructed by thresholding by an increasing value step-by-step\nfrom 0 to 255 and then linking the obtained connected components from successive levels in a\nhierarchy by their inclusion relation:\n\n![image](pics/component_tree.png)\n\nThe component tree may conatain a huge number of regions even for a very simple image as shown in\nthe previous image. This number can easily reach the order of 1 x 10\\^6 regions for an average 1\nMegapixel image. In order to efficiently select suitable regions among all the ERs the algorithm\nmake use of a sequential classifier with two differentiated stages.\n\nIn the first stage incrementally computable descriptors (area, perimeter, bounding box, and euler\nnumber) are computed (in O(1)) for each region r and used as features for a classifier which\nestimates the class-conditional probability p(r|character). Only the ERs which correspond to local\nmaximum of the probability p(r|character) are selected (if their probability is above a global limit\np_min and the difference between local maximum and local minimum is greater than a delta_min\nvalue).\n\nIn the second stage, the ERs that passed the first stage are classified into character and\nnon-character classes using more informative but also more computationally expensive features. (Hole\narea ratio, convex hull ratio, and the number of outer boundary inflexion points).\n\nThis ER filtering process is done in different single-channel projections of the input image in\norder to increase the character localization recall.\n\nAfter the ER filtering is done on each input channel, character candidates must be grouped in\nhigh-level text blocks (i.e. words, text lines, paragraphs, ...). The opencv_text module implements\ntwo different grouping algorithms: the Exhaustive Search algorithm proposed in [Neumann11] for\ngrouping horizontally aligned text, and the method proposed by Lluis Gomez and Dimosthenis Karatzas\nin [Gomez13][Gomez14] for grouping arbitrary oriented text (see erGrouping).\n\nTo see the text detector at work, have a look at the textdetection demo:\n<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/textdetection.cpp>\n\n    @defgroup text_recognize Scene Text Recognition\n  @}\n*/\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/tracking/feature.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_FEATURE_HPP__\n#define __OPENCV_FEATURE_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/imgproc.hpp\"\n#include <iostream>\n#include <string>\n#include <time.h>\n\n/*\n * TODO This implementation is based on apps/traincascade/\n * TODO Changed CvHaarEvaluator based on ADABOOSTING implementation (Grabner et al.)\n */\n\nnamespace cv\n{\n\n//! @addtogroup tracking\n//! @{\n\n#define FEATURES \"features\"\n\n#define CC_FEATURES       FEATURES\n#define CC_FEATURE_PARAMS \"featureParams\"\n#define CC_MAX_CAT_COUNT  \"maxCatCount\"\n#define CC_FEATURE_SIZE   \"featSize\"\n#define CC_NUM_FEATURES   \"numFeat\"\n#define CC_ISINTEGRAL \"isIntegral\"\n#define CC_RECTS       \"rects\"\n#define CC_TILTED      \"tilted\"\n#define CC_RECT \"rect\"\n\n#define LBPF_NAME \"lbpFeatureParams\"\n#define HOGF_NAME \"HOGFeatureParams\"\n#define HFP_NAME \"haarFeatureParams\"\n\n#define CV_HAAR_FEATURE_MAX 3\n#define N_BINS 9\n#define N_CELLS 4\n\n#define CV_SUM_OFFSETS( p0, p1, p2, p3, rect, step )                      \\\n    /* (x, y) */                                                          \\\n    (p0) = (rect).x + (step) * (rect).y;                                  \\\n    /* (x + w, y) */                                                      \\\n    (p1) = (rect).x + (rect).width + (step) * (rect).y;                   \\\n    /* (x + w, y) */                                                      \\\n    (p2) = (rect).x + (step) * ((rect).y + (rect).height);                \\\n    /* (x + w, y + h) */                                                  \\\n    (p3) = (rect).x + (rect).width + (step) * ((rect).y + (rect).height);\n\n#define CV_TILTED_OFFSETS( p0, p1, p2, p3, rect, step )                   \\\n    /* (x, y) */                                                          \\\n    (p0) = (rect).x + (step) * (rect).y;                                  \\\n    /* (x - h, y + h) */                                                  \\\n    (p1) = (rect).x - (rect).height + (step) * ((rect).y + (rect).height);\\\n    /* (x + w, y + w) */                                                  \\\n    (p2) = (rect).x + (rect).width + (step) * ((rect).y + (rect).width);  \\\n    /* (x + w - h, y + w + h) */                                          \\\n    (p3) = (rect).x + (rect).width - (rect).height                        \\\n           + (step) * ((rect).y + (rect).width + (rect).height);\n\nfloat calcNormFactor( const Mat& sum, const Mat& sqSum );\n\ntemplate<class Feature>\nvoid _writeFeatures( const std::vector<Feature> features, FileStorage &fs, const Mat& featureMap )\n{\n  fs << FEATURES << \"[\";\n  const Mat_<int>& featureMap_ = (const Mat_<int>&) featureMap;\n  for ( int fi = 0; fi < featureMap.cols; fi++ )\n    if( featureMap_( 0, fi ) >= 0 )\n    {\n      fs << \"{\";\n      features[fi].write( fs );\n      fs << \"}\";\n    }\n  fs << \"]\";\n}\n\nclass CvParams\n{\n public:\n  CvParams();\n  virtual ~CvParams()\n  {\n  }\n  // from|to file\n  virtual void write( FileStorage &fs ) const = 0;\n  virtual bool read( const FileNode &node ) = 0;\n  // from|to screen\n  virtual void printDefaults() const;\n  virtual void printAttrs() const;\n  virtual bool scanAttr( const std::string prmName, const std::string val );\n  std::string name;\n};\n\nclass CvFeatureParams : public CvParams\n{\n public:\n  enum\n  {\n    HAAR = 0,\n    LBP = 1,\n    HOG = 2\n  };\n  CvFeatureParams();\n  virtual void init( const CvFeatureParams& fp );\n  virtual void write( FileStorage &fs ) const;\n  virtual bool read( const FileNode &node );\n  static Ptr<CvFeatureParams> create( int featureType );\n  int maxCatCount;  // 0 in case of numerical features\n  int featSize;  // 1 in case of simple features (HAAR, LBP) and N_BINS(9)*N_CELLS(4) in case of Dalal's HOG features\n  int numFeatures;\n};\n\nclass CvFeatureEvaluator\n{\n public:\n  virtual ~CvFeatureEvaluator()\n  {\n  }\n  virtual void init( const CvFeatureParams *_featureParams, int _maxSampleCount, Size _winSize );\n  virtual void setImage( const Mat& img, uchar clsLabel, int idx );\n  virtual void writeFeatures( FileStorage &fs, const Mat& featureMap ) const = 0;\n  virtual float operator()( int featureIdx, int sampleIdx ) = 0;\n  static Ptr<CvFeatureEvaluator> create( int type );\n\n  int getNumFeatures() const\n  {\n    return numFeatures;\n  }\n  int getMaxCatCount() const\n  {\n    return featureParams->maxCatCount;\n  }\n  int getFeatureSize() const\n  {\n    return featureParams->featSize;\n  }\n  const Mat& getCls() const\n  {\n    return cls;\n  }\n  float getCls( int si ) const\n  {\n    return cls.at<float>( si, 0 );\n  }\n protected:\n  virtual void generateFeatures() = 0;\n\n  int npos, nneg;\n  int numFeatures;\n  Size winSize;\n  CvFeatureParams *featureParams;\n  Mat cls;\n};\n\nclass CvHaarFeatureParams : public CvFeatureParams\n{\n public:\n\n  CvHaarFeatureParams();\n\n  virtual void init( const CvFeatureParams& fp );\n  virtual void write( FileStorage &fs ) const;\n  virtual bool read( const FileNode &node );\n\n  virtual void printDefaults() const;\n  virtual void printAttrs() const;\n  virtual bool scanAttr( const std::string prm, const std::string val );\n\n  bool isIntegral;\n};\n\nclass CvHaarEvaluator : public CvFeatureEvaluator\n{\n public:\n\n  class FeatureHaar\n  {\n\n   public:\n\n    FeatureHaar( Size patchSize );\n    bool eval( const Mat& image, Rect ROI, float* result ) const;\n    int getNumAreas();\n    const std::vector<float>& getWeights() const;\n    const std::vector<Rect>& getAreas() const;\n    void write( FileStorage ) const\n    {\n    }\n    ;\n    float getInitMean() const;\n    float getInitSigma() const;\n\n   private:\n    int m_type;\n    int m_numAreas;\n    std::vector<float> m_weights;\n    float m_initMean;\n    float m_initSigma;\n    void generateRandomFeature( Size imageSize );\n    float getSum( const Mat& image, Rect imgROI ) const;\n    std::vector<Rect> m_areas;  // areas within the patch over which to compute the feature\n    cv::Size m_initSize;  // size of the patch used during training\n    cv::Size m_curSize;  // size of the patches currently under investigation\n    float m_scaleFactorHeight;  // scaling factor in vertical direction\n    float m_scaleFactorWidth;  // scaling factor in horizontal direction\n    std::vector<Rect> m_scaleAreas;  // areas after scaling\n    std::vector<float> m_scaleWeights;  // weights after scaling\n\n  };\n\n  virtual void init( const CvFeatureParams *_featureParams, int _maxSampleCount, Size _winSize );\n  virtual void setImage( const Mat& img, uchar clsLabel = 0, int idx = 1 );\n  virtual float operator()( int featureIdx, int sampleIdx );\n  virtual void writeFeatures( FileStorage &fs, const Mat& featureMap ) const;\n  void writeFeature( FileStorage &fs ) const;  // for old file format\n  const std::vector<CvHaarEvaluator::FeatureHaar>& getFeatures() const;\n  inline CvHaarEvaluator::FeatureHaar& getFeatures( int idx )\n  {\n    return features[idx];\n  }\n  void setWinSize( Size patchSize );\n  Size setWinSize() const;\n  virtual void generateFeatures();\n\n  /**\n   * TODO new method\n   * \\brief Overload the original generateFeatures in order to limit the number of the features\n   * @param numFeatures Number of the features\n   */\n\n  virtual void generateFeatures( int numFeatures );\n\n protected:\n  bool isIntegral;\n\n  /* TODO Added from MIL implementation */\n  Mat _ii_img;\n  void compute_integral( const cv::Mat & img, std::vector<cv::Mat_<float> > & ii_imgs )\n  {\n    Mat ii_img;\n    integral( img, ii_img, CV_32F );\n    split( ii_img, ii_imgs );\n  }\n\n  std::vector<FeatureHaar> features;\n  Mat sum; /* sum images (each row represents image) */\n};\n\nstruct CvHOGFeatureParams : public CvFeatureParams\n{\n  CvHOGFeatureParams();\n};\n\nclass CvHOGEvaluator : public CvFeatureEvaluator\n{\n public:\n  virtual ~CvHOGEvaluator()\n  {\n  }\n  virtual void init( const CvFeatureParams *_featureParams, int _maxSampleCount, Size _winSize );\n  virtual void setImage( const Mat& img, uchar clsLabel, int idx );\n  virtual float operator()( int varIdx, int sampleIdx );\n  virtual void writeFeatures( FileStorage &fs, const Mat& featureMap ) const;\n protected:\n  virtual void generateFeatures();\n  virtual void integralHistogram( const Mat &img, std::vector<Mat> &histogram, Mat &norm, int nbins ) const;\n  class Feature\n  {\n   public:\n    Feature();\n    Feature( int offset, int x, int y, int cellW, int cellH );\n    float calc( const std::vector<Mat> &_hists, const Mat &_normSum, size_t y, int featComponent ) const;\n    void write( FileStorage &fs ) const;\n    void write( FileStorage &fs, int varIdx ) const;\n\n    Rect rect[N_CELLS];  //cells\n\n    struct\n    {\n      int p0, p1, p2, p3;\n    } fastRect[N_CELLS];\n  };\n  std::vector<Feature> features;\n\n  Mat normSum;  //for nomalization calculation (L1 or L2)\n  std::vector<Mat> hist;\n};\n\ninline float CvHOGEvaluator::operator()( int varIdx, int sampleIdx )\n{\n  int featureIdx = varIdx / ( N_BINS * N_CELLS );\n  int componentIdx = varIdx % ( N_BINS * N_CELLS );\n  //return features[featureIdx].calc( hist, sampleIdx, componentIdx);\n  return features[featureIdx].calc( hist, normSum, sampleIdx, componentIdx );\n}\n\ninline float CvHOGEvaluator::Feature::calc( const std::vector<Mat>& _hists, const Mat& _normSum, size_t y, int featComponent ) const\n{\n  float normFactor;\n  float res;\n\n  int binIdx = featComponent % N_BINS;\n  int cellIdx = featComponent / N_BINS;\n\n  const float *phist = _hists[binIdx].ptr<float>( (int) y );\n  res = phist[fastRect[cellIdx].p0] - phist[fastRect[cellIdx].p1] - phist[fastRect[cellIdx].p2] + phist[fastRect[cellIdx].p3];\n\n  const float *pnormSum = _normSum.ptr<float>( (int) y );\n  normFactor = (float) ( pnormSum[fastRect[0].p0] - pnormSum[fastRect[1].p1] - pnormSum[fastRect[2].p2] + pnormSum[fastRect[3].p3] );\n  res = ( res > 0.001f ) ? ( res / ( normFactor + 0.001f ) ) : 0.f;  //for cutting negative values, which apper due to floating precision\n\n  return res;\n}\n\nstruct CvLBPFeatureParams : CvFeatureParams\n{\n  CvLBPFeatureParams();\n\n};\n\nclass CvLBPEvaluator : public CvFeatureEvaluator\n{\n public:\n  virtual ~CvLBPEvaluator()\n  {\n  }\n  virtual void init( const CvFeatureParams *_featureParams, int _maxSampleCount, Size _winSize );\n  virtual void setImage( const Mat& img, uchar clsLabel, int idx );\n  virtual float operator()( int featureIdx, int sampleIdx )\n  {\n    return (float) features[featureIdx].calc( sum, sampleIdx );\n  }\n  virtual void writeFeatures( FileStorage &fs, const Mat& featureMap ) const;\n protected:\n  virtual void generateFeatures();\n\n  class Feature\n  {\n   public:\n    Feature();\n    Feature( int offset, int x, int y, int _block_w, int _block_h );\n    uchar calc( const Mat& _sum, size_t y ) const;\n    void write( FileStorage &fs ) const;\n\n    Rect rect;\n    int p[16];\n  };\n  std::vector<Feature> features;\n\n  Mat sum;\n};\n\ninline uchar CvLBPEvaluator::Feature::calc( const Mat &_sum, size_t y ) const\n{\n  const int* psum = _sum.ptr<int>( (int) y );\n  int cval = psum[p[5]] - psum[p[6]] - psum[p[9]] + psum[p[10]];\n\n  return (uchar) ( ( psum[p[0]] - psum[p[1]] - psum[p[4]] + psum[p[5]] >= cval ? 128 : 0 ) |   // 0\n      ( psum[p[1]] - psum[p[2]] - psum[p[5]] + psum[p[6]] >= cval ? 64 : 0 ) |    // 1\n      ( psum[p[2]] - psum[p[3]] - psum[p[6]] + psum[p[7]] >= cval ? 32 : 0 ) |    // 2\n      ( psum[p[6]] - psum[p[7]] - psum[p[10]] + psum[p[11]] >= cval ? 16 : 0 ) |  // 5\n      ( psum[p[10]] - psum[p[11]] - psum[p[14]] + psum[p[15]] >= cval ? 8 : 0 ) |  // 8\n      ( psum[p[9]] - psum[p[10]] - psum[p[13]] + psum[p[14]] >= cval ? 4 : 0 ) |  // 7\n      ( psum[p[8]] - psum[p[9]] - psum[p[12]] + psum[p[13]] >= cval ? 2 : 0 ) |   // 6\n      ( psum[p[4]] - psum[p[5]] - psum[p[8]] + psum[p[9]] >= cval ? 1 : 0 ) );     // 3\n}\n\n//! @}\n\n} /* namespace cv */\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/tracking/kalman_filters.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2015, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_TRACKING_KALMAN_HPP_\n#define __OPENCV_TRACKING_KALMAN_HPP_\n\n#include \"opencv2/core.hpp\"\n#include <limits>\n\nnamespace cv\n{\nnamespace tracking\n{\n\n/** @brief The interface for Unscented Kalman filter and Augmented Unscented Kalman filter.\n*/\nclass CV_EXPORTS UnscentedKalmanFilter\n{\npublic:\n\n    virtual ~UnscentedKalmanFilter(){}\n\n    /** The function performs prediction step of the algorithm\n    * @param control - the current control vector,\n    * @return the predicted estimate of the state.\n    */\n    virtual Mat predict( const Mat& control = Mat() ) = 0;\n\n    /** The function performs correction step of the algorithm\n    * @param measurement - the current measurement vector,\n    * @return the corrected estimate of the state.\n    */\n    virtual Mat correct( const Mat& measurement ) = 0;\n\n    /**\n    * @return the process noise cross-covariance matrix.\n    */\n    virtual Mat getProcessNoiseCov() const = 0;\n\n    /**\n    * @return the measurement noise cross-covariance matrix.\n    */\n    virtual Mat getMeasurementNoiseCov() const = 0;\n\n    /**\n    * @return the error cross-covariance matrix.\n    */\n    virtual Mat getErrorCov() const = 0;\n\n    /**\n    * @return the current estimate of the state.\n    */\n    virtual Mat getState() const = 0;\n};\n\n/** @brief Model of dynamical system for Unscented Kalman filter.\n* The interface for dynamical system model. It contains functions for computing the next state and the measurement.\n* It must be inherited for using UKF.\n*/\nclass CV_EXPORTS UkfSystemModel\n{\npublic:\n\n    virtual ~UkfSystemModel(){}\n\n    /** The function for computing the next state from the previous state\n    * @param x_k - previous state vector,\n    * @param u_k - control vector,\n    * @param v_k - noise vector,\n    * @param x_kplus1 - next state vector.\n    */\n    virtual void stateConversionFunction( const Mat& x_k, const Mat& u_k, const Mat& v_k, Mat& x_kplus1 ) = 0;\n    /** The function for computing the measurement from the state\n    * @param x_k - state vector,\n    * @param n_k - noise vector,\n    * @param z_k - measurement vector.\n    */\n    virtual void measurementFunction( const Mat& x_k, const Mat& n_k, Mat& z_k ) = 0;\n};\n\n\n/** @brief Unscented Kalman filter parameters.\n* The class for initialization parameters of Unscented Kalman filter\n*/\nclass CV_EXPORTS UnscentedKalmanFilterParams\n{\npublic:\n\n     int DP;                                     //!< Dimensionality of the state vector.\n     int MP;                                     //!< Dimensionality of the measurement vector.\n     int CP;                                     //!< Dimensionality of the control vector.\n     int dataType;                               //!< Type of elements of vectors and matrices, default is CV_64F.\n\n     Mat stateInit;                              //!< Initial state, DP x 1, default is zero.\n     Mat errorCovInit;                           //!< State estimate cross-covariance matrix, DP x DP, default is identity.\n\n     Mat processNoiseCov;                        //!< Process noise cross-covariance matrix, DP x DP.\n     Mat measurementNoiseCov;                    //!< Measurement noise cross-covariance matrix, MP x MP.\n\n     // Parameters of algorithm\n     double alpha;                               //!< Default is 1e-3.\n     double k;                                   //!< Default is 0.\n     double beta;                                //!< Default is 2.0.\n\n     //Dynamical system model\n     Ptr<UkfSystemModel> model;                  //!< Object of the class containing functions for computing the next state and the measurement.\n\n    /** The constructors.\n    */\n    UnscentedKalmanFilterParams(){}\n\n    /**\n    * @param dp - dimensionality of the state vector,\n    * @param mp - dimensionality of the measurement vector,\n    * @param cp - dimensionality of the control vector,\n    * @param processNoiseCovDiag - value of elements on main diagonal process noise cross-covariance matrix,\n    * @param measurementNoiseCovDiag - value of elements on main diagonal measurement noise cross-covariance matrix,\n    * @param dynamicalSystem - ptr to object of the class containing functions for computing the next state and the measurement,\n    * @param type - type of the created matrices that should be CV_32F or CV_64F.\n    */\n    UnscentedKalmanFilterParams( int dp, int mp, int cp, double processNoiseCovDiag, double measurementNoiseCovDiag,\n                                Ptr<UkfSystemModel> dynamicalSystem, int type = CV_64F );\n\n    /** The function for initialization of Unscented Kalman filter\n    * @param dp - dimensionality of the state vector,\n    * @param mp - dimensionality of the measurement vector,\n    * @param cp - dimensionality of the control vector,\n    * @param processNoiseCovDiag - value of elements on main diagonal process noise cross-covariance matrix,\n    * @param measurementNoiseCovDiag - value of elements on main diagonal measurement noise cross-covariance matrix,\n    * @param dynamicalSystem - ptr to object of the class containing functions for computing the next state and the measurement,\n    * @param type - type of the created matrices that should be CV_32F or CV_64F.\n    */\n    void init( int dp, int mp, int cp, double processNoiseCovDiag, double measurementNoiseCovDiag,\n                                Ptr<UkfSystemModel> dynamicalSystem, int type = CV_64F );\n};\n\n/** @brief Augmented Unscented Kalman filter parameters.\n* The class for initialization parameters of Augmented Unscented Kalman filter\n*/\nclass CV_EXPORTS AugmentedUnscentedKalmanFilterParams: public UnscentedKalmanFilterParams\n{\npublic:\n\n    AugmentedUnscentedKalmanFilterParams(){}\n\n    /**\n    * @param dp - dimensionality of the state vector,\n    * @param mp - dimensionality of the measurement vector,\n    * @param cp - dimensionality of the control vector,\n    * @param processNoiseCovDiag - value of elements on main diagonal process noise cross-covariance matrix,\n    * @param measurementNoiseCovDiag - value of elements on main diagonal measurement noise cross-covariance matrix,\n    * @param dynamicalSystem - ptr to object of the class containing functions for computing the next state and the measurement,\n    * @param type - type of the created matrices that should be CV_32F or CV_64F.\n    */\n    AugmentedUnscentedKalmanFilterParams( int dp, int mp, int cp, double processNoiseCovDiag, double measurementNoiseCovDiag,\n                                Ptr<UkfSystemModel> dynamicalSystem, int type = CV_64F );\n\n    /** The function for initialization of Augmented Unscented Kalman filter\n    * @param dp - dimensionality of the state vector,\n    * @param mp - dimensionality of the measurement vector,\n    * @param cp - dimensionality of the control vector,\n    * @param processNoiseCovDiag - value of elements on main diagonal process noise cross-covariance matrix,\n    * @param measurementNoiseCovDiag - value of elements on main diagonal measurement noise cross-covariance matrix,\n    * @param dynamicalSystem - object of the class containing functions for computing the next state and the measurement,\n    * @param type - type of the created matrices that should be CV_32F or CV_64F.\n    */\n    void init( int dp, int mp, int cp, double processNoiseCovDiag, double measurementNoiseCovDiag,\n                                Ptr<UkfSystemModel> dynamicalSystem, int type = CV_64F );\n};\n\n/** @brief Unscented Kalman Filter factory method\n\n* The class implements an Unscented Kalman filter <https://en.wikipedia.org/wiki/Kalman_filter#Unscented_Kalman_filter>.\n* @param params - an object of the UnscentedKalmanFilterParams class containing UKF parameters.\n* @return pointer to the object of the UnscentedKalmanFilterImpl class implementing UnscentedKalmanFilter.\n*/\nCV_EXPORTS Ptr<UnscentedKalmanFilter> createUnscentedKalmanFilter( const UnscentedKalmanFilterParams &params );\n/** @brief Augmented Unscented Kalman Filter factory method\n\n* The class implements an Augmented Unscented Kalman filter http://becs.aalto.fi/en/research/bayes/ekfukf/documentation.pdf, page 31-33.\n* AUKF is more accurate than UKF but its computational complexity is larger.\n* @param params - an object of the AugmentedUnscentedKalmanFilterParams class containing AUKF parameters.\n* @return pointer to the object of the AugmentedUnscentedKalmanFilterImpl class implementing UnscentedKalmanFilter.\n*/\nCV_EXPORTS Ptr<UnscentedKalmanFilter> createAugmentedUnscentedKalmanFilter( const AugmentedUnscentedKalmanFilterParams &params );\n\n} // tracking\n} // cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/tracking/onlineBoosting.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_ONLINEBOOSTING_HPP__\n#define __OPENCV_ONLINEBOOSTING_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup tracking\n//! @{\n\n//TODO based on the original implementation\n//http://vision.ucsd.edu/~bbabenko/project_miltrack.shtml\n\nclass BaseClassifier;\nclass WeakClassifierHaarFeature;\nclass EstimatedGaussDistribution;\nclass ClassifierThreshold;\nclass Detector;\n\nclass StrongClassifierDirectSelection\n{\n public:\n\n  StrongClassifierDirectSelection( int numBaseClf, int numWeakClf, Size patchSz, const Rect& sampleROI, bool useFeatureEx = false, int iterationInit =\n                                       0 );\n  virtual ~StrongClassifierDirectSelection();\n\n  void initBaseClassifier();\n\n  bool update( const Mat& image, int target, float importance = 1.0 );\n  float eval( const Mat& response );\n  std::vector<int> getSelectedWeakClassifier();\n  float classifySmooth( const std::vector<Mat>& images, const Rect& sampleROI, int& idx );\n  int getNumBaseClassifier();\n  Size getPatchSize() const;\n  Rect getROI() const;\n  bool getUseFeatureExchange() const;\n  int getReplacedClassifier() const;\n\n  void replaceWeakClassifier( int idx );\n  int getSwappedClassifier() const;\n private:\n\n  //StrongClassifier\n  int numBaseClassifier;\n  int numAllWeakClassifier;\n  int numWeakClassifier;\n  int iterInit;\n  BaseClassifier** baseClassifier;\n  std::vector<float> alpha;\n  cv::Size patchSize;\n\n  bool useFeatureExchange;\n\n  //StrongClassifierDirectSelection\n  std::vector<bool> m_errorMask;\n  std::vector<float> m_errors;\n  std::vector<float> m_sumErrors;\n\n  Detector* detector;\n  Rect ROI;\n\n  int replacedClassifier;\n  int swappedClassifier;\n};\n\nclass BaseClassifier\n{\n public:\n\n  BaseClassifier( int numWeakClassifier, int iterationInit );\n  BaseClassifier( int numWeakClassifier, int iterationInit, WeakClassifierHaarFeature** weakCls );\n\n  WeakClassifierHaarFeature** getReferenceWeakClassifier()\n  {\n    return weakClassifier;\n  }\n  ;\n  void trainClassifier( const Mat& image, int target, float importance, std::vector<bool>& errorMask );\n  int selectBestClassifier( std::vector<bool>& errorMask, float importance, std::vector<float> & errors );\n  int computeReplaceWeakestClassifier( const std::vector<float> & errors );\n  void replaceClassifierStatistic( int sourceIndex, int targetIndex );\n  int getIdxOfNewWeakClassifier()\n  {\n    return m_idxOfNewWeakClassifier;\n  }\n  ;\n  int eval( const Mat& image );\n  virtual ~BaseClassifier();\n  float getError( int curWeakClassifier );\n  void getErrors( float* errors );\n  int getSelectedClassifier() const;\n  void replaceWeakClassifier( int index );\n\n protected:\n\n  void generateRandomClassifier();\n  WeakClassifierHaarFeature** weakClassifier;\n  bool m_referenceWeakClassifier;\n  int m_numWeakClassifier;\n  int m_selectedClassifier;\n  int m_idxOfNewWeakClassifier;\n  std::vector<float> m_wCorrect;\n  std::vector<float> m_wWrong;\n  int m_iterationInit;\n\n};\n\nclass EstimatedGaussDistribution\n{\n public:\n\n  EstimatedGaussDistribution();\n  EstimatedGaussDistribution( float P_mean, float R_mean, float P_sigma, float R_sigma );\n  virtual ~EstimatedGaussDistribution();\n  void update( float value );  //, float timeConstant = -1.0);\n  float getMean();\n  float getSigma();\n  void setValues( float mean, float sigma );\n\n private:\n\n  float m_mean;\n  float m_sigma;\n  float m_P_mean;\n  float m_P_sigma;\n  float m_R_mean;\n  float m_R_sigma;\n};\n\nclass WeakClassifierHaarFeature\n{\n\n public:\n\n  WeakClassifierHaarFeature();\n  virtual ~WeakClassifierHaarFeature();\n\n  bool update( float value, int target );\n  int eval( float value );\n\n private:\n\n  float sigma;\n  float mean;\n  ClassifierThreshold* m_classifier;\n\n  void getInitialDistribution( EstimatedGaussDistribution *distribution );\n  void generateRandomClassifier( EstimatedGaussDistribution* m_posSamples, EstimatedGaussDistribution* m_negSamples );\n\n};\n\nclass Detector\n{\n public:\n\n  Detector( StrongClassifierDirectSelection* classifier );\n  virtual\n  ~Detector( void );\n\n  void\n  classifySmooth( const std::vector<Mat>& image, float minMargin = 0 );\n\n  int\n  getNumDetections();\n  float\n  getConfidence( int patchIdx );\n  float\n  getConfidenceOfDetection( int detectionIdx );\n\n  float getConfidenceOfBestDetection()\n  {\n    return m_maxConfidence;\n  }\n  ;\n  int\n  getPatchIdxOfBestDetection();\n\n  int\n  getPatchIdxOfDetection( int detectionIdx );\n\n  const std::vector<int> &\n  getIdxDetections() const\n  {\n    return m_idxDetections;\n  }\n  ;\n  const std::vector<float> &\n  getConfidences() const\n  {\n    return m_confidences;\n  }\n  ;\n\n  const cv::Mat &\n  getConfImageDisplay() const\n  {\n    return m_confImageDisplay;\n  }\n\n private:\n\n  void\n  prepareConfidencesMemory( int numPatches );\n  void\n  prepareDetectionsMemory( int numDetections );\n\n  StrongClassifierDirectSelection* m_classifier;\n  std::vector<float> m_confidences;\n  int m_sizeConfidences;\n  int m_numDetections;\n  std::vector<int> m_idxDetections;\n  int m_sizeDetections;\n  int m_idxBestDetection;\n  float m_maxConfidence;\n  cv::Mat_<float> m_confMatrix;\n  cv::Mat_<float> m_confMatrixSmooth;\n  cv::Mat_<unsigned char> m_confImageDisplay;\n};\n\nclass ClassifierThreshold\n{\n public:\n\n  ClassifierThreshold( EstimatedGaussDistribution* posSamples, EstimatedGaussDistribution* negSamples );\n  virtual ~ClassifierThreshold();\n\n  void update( float value, int target );\n  int eval( float value );\n\n  void* getDistribution( int target );\n\n private:\n\n  EstimatedGaussDistribution* m_posSamples;\n  EstimatedGaussDistribution* m_negSamples;\n\n  float m_threshold;\n  int m_parity;\n};\n\n//! @}\n\n} /* namespace cv */\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/tracking/onlineMIL.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_ONLINEMIL_HPP__\n#define __OPENCV_ONLINEMIL_HPP__\n\n#include \"opencv2/core.hpp\"\n#include <limits>\n\nnamespace cv\n{\n\n//! @addtogroup tracking\n//! @{\n\n//TODO based on the original implementation\n//http://vision.ucsd.edu/~bbabenko/project_miltrack.shtml\n\n#define  sign(s)  ((s > 0 ) ? 1 : ((s<0) ? -1 : 0))\n\nclass ClfOnlineStump;\n\nclass CV_EXPORTS ClfMilBoost\n{\n public:\n  struct CV_EXPORTS Params\n  {\n    Params();\n    int _numSel;\n    int _numFeat;\n    float _lRate;\n  };\n\n  ClfMilBoost();\n  ~ClfMilBoost();\n  void init( const ClfMilBoost::Params &parameters = ClfMilBoost::Params() );\n  void update( const Mat& posx, const Mat& negx );\n  std::vector<float> classify( const Mat& x, bool logR = true );\n\n  inline float sigmoid( float x )\n  {\n    return 1.0f / ( 1.0f + exp( -x ) );\n  }\n\n private:\n  uint _numsamples;\n  ClfMilBoost::Params _myParams;\n  std::vector<int> _selectors;\n  std::vector<ClfOnlineStump*> _weakclf;\n  uint _counter;\n\n};\n\nclass ClfOnlineStump\n{\n public:\n  float _mu0, _mu1, _sig0, _sig1;\n  float _q;\n  int _s;\n  float _log_n1, _log_n0;\n  float _e1, _e0;\n  float _lRate;\n\n  ClfOnlineStump();\n  ClfOnlineStump( int ind );\n  void init();\n  void update( const Mat& posx, const Mat& negx, const cv::Mat_<float> & posw = cv::Mat_<float>(), const cv::Mat_<float> & negw = cv::Mat_<float>() );\n  bool classify( const Mat& x, int i );\n  float classifyF( const Mat& x, int i );\n  std::vector<float> classifySetF( const Mat& x );\n\n private:\n  bool _trained;\n  int _ind;\n\n};\n\n//! @}\n\n} /* namespace cv */\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/tracking/tldDataset.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef OPENCV_TLD_DATASET\n#define OPENCV_TLD_DATASET\n\n#include \"opencv2/highgui.hpp\"\n\nnamespace cv\n{\n\tnamespace tld\n\t{\n\t\tCV_EXPORTS cv::Rect2d tld_InitDataset(int videoInd, const char* rootPath = \"TLD_dataset\", int datasetInd = 0);\n\t\tCV_EXPORTS cv::Mat tld_getNextDatasetFrame();\n\t}\n}\n\n#endif"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/tracking/tracker.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_TRACKER_HPP__\n#define __OPENCV_TRACKER_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/imgproc/types_c.h\"\n#include \"feature.hpp\"\n#include \"onlineMIL.hpp\"\n#include \"onlineBoosting.hpp\"\n#include <iostream>\n\n\n#define BOILERPLATE_CODE(name,classname) \\\n    static Ptr<classname> createTracker(const classname::Params &parameters=classname::Params());\\\n    virtual ~classname(){};\n\n/*\n * Partially based on:\n * ====================================================================================================================\n * \t- [AAM] S. Salti, A. Cavallaro, L. Di Stefano, Adaptive Appearance Modeling for Video Tracking: Survey and Evaluation\n *  - [AMVOT] X. Li, W. Hu, C. Shen, Z. Zhang, A. Dick, A. van den Hengel, A Survey of Appearance Models in Visual Object Tracking\n *\n * This Tracking API has been designed with PlantUML. If you modify this API please change UML files under modules/tracking/doc/uml\n *\n */\n\nnamespace cv\n{\n\n//! @addtogroup tracking\n//! @{\n\n/************************************ TrackerFeature Base Classes ************************************/\n\n/** @brief Abstract base class for TrackerFeature that represents the feature.\n */\nclass CV_EXPORTS TrackerFeature\n{\n public:\n  virtual ~TrackerFeature();\n\n  /** @brief Compute the features in the images collection\n    @param images The images\n    @param response The output response\n     */\n  void compute( const std::vector<Mat>& images, Mat& response );\n\n  /** @brief Create TrackerFeature by tracker feature type\n    @param trackerFeatureType The TrackerFeature name\n\n    The modes available now:\n\n    -   \"HAAR\" -- Haar Feature-based\n\n    The modes that will be available soon:\n\n    -   \"HOG\" -- Histogram of Oriented Gradients features\n    -   \"LBP\" -- Local Binary Pattern features\n    -   \"FEATURE2D\" -- All types of Feature2D\n     */\n  static Ptr<TrackerFeature> create( const String& trackerFeatureType );\n\n  /** @brief Identify most effective features\n    @param response Collection of response for the specific TrackerFeature\n    @param npoints Max number of features\n\n    @note This method modifies the response parameter\n     */\n  virtual void selection( Mat& response, int npoints ) = 0;\n\n  /** @brief Get the name of the specific TrackerFeature\n     */\n  String getClassName() const;\n\n protected:\n\n  virtual bool computeImpl( const std::vector<Mat>& images, Mat& response ) = 0;\n\n  String className;\n};\n\n/** @brief Class that manages the extraction and selection of features\n\n@cite AAM Feature Extraction and Feature Set Refinement (Feature Processing and Feature Selection).\nSee table I and section III C @cite AMVOT Appearance modelling -\\> Visual representation (Table II,\nsection 3.1 - 3.2)\n\nTrackerFeatureSet is an aggregation of TrackerFeature\n\n@sa\n   TrackerFeature\n\n */\nclass CV_EXPORTS TrackerFeatureSet\n{\n public:\n\n  TrackerFeatureSet();\n\n  ~TrackerFeatureSet();\n\n  /** @brief Extract features from the images collection\n    @param images The input images\n     */\n  void extraction( const std::vector<Mat>& images );\n\n  /** @brief Identify most effective features for all feature types (optional)\n     */\n  void selection();\n\n  /** @brief Remove outliers for all feature types (optional)\n     */\n  void removeOutliers();\n\n  /** @brief Add TrackerFeature in the collection. Return true if TrackerFeature is added, false otherwise\n    @param trackerFeatureType The TrackerFeature name\n\n    The modes available now:\n\n    -   \"HAAR\" -- Haar Feature-based\n\n    The modes that will be available soon:\n\n    -   \"HOG\" -- Histogram of Oriented Gradients features\n    -   \"LBP\" -- Local Binary Pattern features\n    -   \"FEATURE2D\" -- All types of Feature2D\n\n    Example TrackerFeatureSet::addTrackerFeature : :\n    @code\n        //sample usage:\n\n        Ptr<TrackerFeature> trackerFeature = new TrackerFeatureHAAR( HAARparameters );\n        featureSet->addTrackerFeature( trackerFeature );\n\n        //or add CSC sampler with default parameters\n        //featureSet->addTrackerFeature( \"HAAR\" );\n    @endcode\n    @note If you use the second method, you must initialize the TrackerFeature\n     */\n  bool addTrackerFeature( String trackerFeatureType );\n\n  /** @overload\n    @param feature The TrackerFeature class\n    */\n  bool addTrackerFeature( Ptr<TrackerFeature>& feature );\n\n  /** @brief Get the TrackerFeature collection (TrackerFeature name, TrackerFeature pointer)\n     */\n  const std::vector<std::pair<String, Ptr<TrackerFeature> > >& getTrackerFeature() const;\n\n  /** @brief Get the responses\n\n    @note Be sure to call extraction before getResponses Example TrackerFeatureSet::getResponses : :\n     */\n  const std::vector<Mat>& getResponses() const;\n\n private:\n\n  void clearResponses();\n  bool blockAddTrackerFeature;\n\n  std::vector<std::pair<String, Ptr<TrackerFeature> > > features;  //list of features\n  std::vector<Mat> responses;\t\t\t\t//list of response after compute\n\n};\n\n/************************************ TrackerSampler Base Classes ************************************/\n\n/** @brief Abstract base class for TrackerSamplerAlgorithm that represents the algorithm for the specific\nsampler.\n */\nclass CV_EXPORTS TrackerSamplerAlgorithm\n{\n public:\n  /**\n   * \\brief Destructor\n   */\n  virtual ~TrackerSamplerAlgorithm();\n\n  /** @brief Create TrackerSamplerAlgorithm by tracker sampler type.\n    @param trackerSamplerType The trackerSamplerType name\n\n    The modes available now:\n\n    -   \"CSC\" -- Current State Center\n    -   \"CS\" -- Current State\n     */\n  static Ptr<TrackerSamplerAlgorithm> create( const String& trackerSamplerType );\n\n  /** @brief Computes the regions starting from a position in an image.\n\n    Return true if samples are computed, false otherwise\n\n    @param image The current frame\n    @param boundingBox The bounding box from which regions can be calculated\n\n    @param sample The computed samples @cite AAM Fig. 1 variable Sk\n     */\n  bool sampling( const Mat& image, Rect boundingBox, std::vector<Mat>& sample );\n\n  /** @brief Get the name of the specific TrackerSamplerAlgorithm\n    */\n  String getClassName() const;\n\n protected:\n  String className;\n\n  virtual bool samplingImpl( const Mat& image, Rect boundingBox, std::vector<Mat>& sample ) = 0;\n};\n\n/**\n * \\brief Class that manages the sampler in order to select regions for the update the model of the tracker\n * [AAM] Sampling e Labeling. See table I and section III B\n */\n\n/** @brief Class that manages the sampler in order to select regions for the update the model of the tracker\n\n@cite AAM Sampling e Labeling. See table I and section III B\n\nTrackerSampler is an aggregation of TrackerSamplerAlgorithm\n@sa\n   TrackerSamplerAlgorithm\n */\nclass CV_EXPORTS TrackerSampler\n{\n public:\n\n  /**\n   * \\brief Constructor\n   */\n  TrackerSampler();\n\n  /**\n   * \\brief Destructor\n   */\n  ~TrackerSampler();\n\n  /** @brief Computes the regions starting from a position in an image\n    @param image The current frame\n    @param boundingBox The bounding box from which regions can be calculated\n     */\n  void sampling( const Mat& image, Rect boundingBox );\n\n  /** @brief Return the collection of the TrackerSamplerAlgorithm\n    */\n  const std::vector<std::pair<String, Ptr<TrackerSamplerAlgorithm> > >& getSamplers() const;\n\n  /** @brief Return the samples from all TrackerSamplerAlgorithm, @cite AAM Fig. 1 variable Sk\n    */\n  const std::vector<Mat>& getSamples() const;\n\n  /** @brief Add TrackerSamplerAlgorithm in the collection. Return true if sampler is added, false otherwise\n    @param trackerSamplerAlgorithmType The TrackerSamplerAlgorithm name\n\n    The modes available now:\n    -   \"CSC\" -- Current State Center\n    -   \"CS\" -- Current State\n    -   \"PF\" -- Particle Filtering\n\n    Example TrackerSamplerAlgorithm::addTrackerSamplerAlgorithm : :\n    @code\n         TrackerSamplerCSC::Params CSCparameters;\n         Ptr<TrackerSamplerAlgorithm> CSCSampler = new TrackerSamplerCSC( CSCparameters );\n\n         if( !sampler->addTrackerSamplerAlgorithm( CSCSampler ) )\n           return false;\n\n         //or add CSC sampler with default parameters\n         //sampler->addTrackerSamplerAlgorithm( \"CSC\" );\n    @endcode\n    @note If you use the second method, you must initialize the TrackerSamplerAlgorithm\n     */\n  bool addTrackerSamplerAlgorithm( String trackerSamplerAlgorithmType );\n\n  /** @overload\n    @param sampler The TrackerSamplerAlgorithm\n    */\n  bool addTrackerSamplerAlgorithm( Ptr<TrackerSamplerAlgorithm>& sampler );\n\n private:\n  std::vector<std::pair<String, Ptr<TrackerSamplerAlgorithm> > > samplers;\n  std::vector<Mat> samples;\n  bool blockAddTrackerSampler;\n\n  void clearSamples();\n};\n\n/************************************ TrackerModel Base Classes ************************************/\n\n/** @brief Abstract base class for TrackerTargetState that represents a possible state of the target.\n\nSee @cite AAM \\f$\\hat{x}^{i}_{k}\\f$ all the states candidates.\n\nInherits this class with your Target state, In own implementation you can add scale variation,\nwidth, height, orientation, etc.\n */\nclass CV_EXPORTS TrackerTargetState\n{\n public:\n  virtual ~TrackerTargetState()\n  {\n  }\n  ;\n  /**\n   * \\brief Get the position\n   * \\return The position\n   */\n  Point2f getTargetPosition() const;\n\n  /**\n   * \\brief Set the position\n   * \\param position The position\n   */\n  void setTargetPosition( const Point2f& position );\n  /**\n   * \\brief Get the width of the target\n   * \\return The width of the target\n   */\n  int getTargetWidth() const;\n\n  /**\n   * \\brief Set the width of the target\n   * \\param width The width of the target\n   */\n  void setTargetWidth( int width );\n  /**\n   * \\brief Get the height of the target\n   * \\return The height of the target\n   */\n  int getTargetHeight() const;\n\n  /**\n   * \\brief Set the height of the target\n   * \\param height The height of the target\n   */\n  void setTargetHeight( int height );\n\n protected:\n  Point2f targetPosition;\n  int targetWidth;\n  int targetHeight;\n\n};\n\n/** @brief Represents the model of the target at frame \\f$k\\f$ (all states and scores)\n\nSee @cite AAM The set of the pair \\f$\\langle \\hat{x}^{i}_{k}, C^{i}_{k} \\rangle\\f$\n@sa TrackerTargetState\n */\ntypedef std::vector<std::pair<Ptr<TrackerTargetState>, float> > ConfidenceMap;\n\n/** @brief Represents the estimate states for all frames\n\n@cite AAM \\f$x_{k}\\f$ is the trajectory of the target up to time \\f$k\\f$\n\n@sa TrackerTargetState\n */\ntypedef std::vector<Ptr<TrackerTargetState> > Trajectory;\n\n/** @brief Abstract base class for TrackerStateEstimator that estimates the most likely target state.\n\nSee @cite AAM State estimator\n\nSee @cite AMVOT Statistical modeling (Fig. 3), Table III (generative) - IV (discriminative) - V (hybrid)\n */\nclass CV_EXPORTS TrackerStateEstimator\n{\n public:\n  virtual ~TrackerStateEstimator();\n\n  /** @brief Estimate the most likely target state, return the estimated state\n    @param confidenceMaps The overall appearance model as a list of :cConfidenceMap\n     */\n  Ptr<TrackerTargetState> estimate( const std::vector<ConfidenceMap>& confidenceMaps );\n\n  /** @brief Update the ConfidenceMap with the scores\n    @param confidenceMaps The overall appearance model as a list of :cConfidenceMap\n     */\n  void update( std::vector<ConfidenceMap>& confidenceMaps );\n\n  /** @brief Create TrackerStateEstimator by tracker state estimator type\n    @param trackeStateEstimatorType The TrackerStateEstimator name\n\n    The modes available now:\n\n    -   \"BOOSTING\" -- Boosting-based discriminative appearance models. See @cite AMVOT section 4.4\n\n    The modes available soon:\n\n    -   \"SVM\" -- SVM-based discriminative appearance models. See @cite AMVOT section 4.5\n     */\n  static Ptr<TrackerStateEstimator> create( const String& trackeStateEstimatorType );\n\n  /** @brief Get the name of the specific TrackerStateEstimator\n     */\n  String getClassName() const;\n\n protected:\n\n  virtual Ptr<TrackerTargetState> estimateImpl( const std::vector<ConfidenceMap>& confidenceMaps ) = 0;\n  virtual void updateImpl( std::vector<ConfidenceMap>& confidenceMaps ) = 0;\n  String className;\n};\n\n/** @brief Abstract class that represents the model of the target. It must be instantiated by specialized\ntracker\n\nSee @cite AAM Ak\n\nInherits this with your TrackerModel\n */\nclass CV_EXPORTS TrackerModel\n{\n public:\n\n  /**\n   * \\brief Constructor\n   */\n  TrackerModel();\n\n  /**\n   * \\brief Destructor\n   */\n  virtual ~TrackerModel();\n\n  /** @brief Set TrackerEstimator, return true if the tracker state estimator is added, false otherwise\n    @param trackerStateEstimator The TrackerStateEstimator\n    @note You can add only one TrackerStateEstimator\n     */\n  bool setTrackerStateEstimator( Ptr<TrackerStateEstimator> trackerStateEstimator );\n\n  /** @brief Estimate the most likely target location\n\n    @cite AAM ME, Model Estimation table I\n    @param responses Features extracted from TrackerFeatureSet\n     */\n  void modelEstimation( const std::vector<Mat>& responses );\n\n  /** @brief Update the model\n\n    @cite AAM MU, Model Update table I\n     */\n  void modelUpdate();\n\n  /** @brief Run the TrackerStateEstimator, return true if is possible to estimate a new state, false otherwise\n    */\n  bool runStateEstimator();\n\n  /** @brief Set the current TrackerTargetState in the Trajectory\n    @param lastTargetState The current TrackerTargetState\n     */\n  void setLastTargetState( const Ptr<TrackerTargetState>& lastTargetState );\n\n  /** @brief Get the last TrackerTargetState from Trajectory\n    */\n  Ptr<TrackerTargetState> getLastTargetState() const;\n\n  /** @brief Get the list of the ConfidenceMap\n    */\n  const std::vector<ConfidenceMap>& getConfidenceMaps() const;\n\n  /** @brief Get the last ConfidenceMap for the current frame\n     */\n  const ConfidenceMap& getLastConfidenceMap() const;\n\n  /** @brief Get the TrackerStateEstimator\n    */\n  Ptr<TrackerStateEstimator> getTrackerStateEstimator() const;\n\n private:\n\n  void clearCurrentConfidenceMap();\n\n protected:\n  std::vector<ConfidenceMap> confidenceMaps;\n  Ptr<TrackerStateEstimator> stateEstimator;\n  ConfidenceMap currentConfidenceMap;\n  Trajectory trajectory;\n  int maxCMLength;\n\n  virtual void modelEstimationImpl( const std::vector<Mat>& responses ) = 0;\n  virtual void modelUpdateImpl() = 0;\n\n};\n\n/************************************ Tracker Base Class ************************************/\n\n/** @brief Base abstract class for the long-term tracker:\n */\nclass CV_EXPORTS_W Tracker : public virtual Algorithm\n{\n public:\n\n  virtual ~Tracker();\n\n  /** @brief Initialize the tracker with a know bounding box that surrounding the target\n    @param image The initial frame\n    @param boundingBox The initial boundig box\n\n    @return True if initialization went succesfully, false otherwise\n     */\n  CV_WRAP bool init( const Mat& image, const Rect2d& boundingBox );\n\n  /** @brief Update the tracker, find the new most likely bounding box for the target\n    @param image The current frame\n    @param boundingBox The boundig box that represent the new target location, if true was returned, not\n    modified otherwise\n\n    @return True means that target was located and false means that tracker cannot locate target in\n    current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed\n    missing from the frame (say, out of sight)\n     */\n  CV_WRAP bool update( const Mat& image, CV_OUT Rect2d& boundingBox );\n\n  /** @brief Creates a tracker by its name.\n    @param trackerType Tracker type\n\n    The following detector types are supported:\n\n    -   \"MIL\" -- TrackerMIL\n    -   \"BOOSTING\" -- TrackerBoosting\n     */\n  CV_WRAP static Ptr<Tracker> create( const String& trackerType );\n\n  virtual void read( const FileNode& fn )=0;\n  virtual void write( FileStorage& fs ) const=0;\n\n  Ptr<TrackerModel> getModel()\n  {\n\t  return model;\n  }\n\n protected:\n\n  virtual bool initImpl( const Mat& image, const Rect2d& boundingBox ) = 0;\n  virtual bool updateImpl( const Mat& image, Rect2d& boundingBox ) = 0;\n\n  bool isInit;\n\n  Ptr<TrackerFeatureSet> featureSet;\n  Ptr<TrackerSampler> sampler;\n  Ptr<TrackerModel> model;\n};\n\n\n/************************************ Specific TrackerStateEstimator Classes ************************************/\n\n/** @brief TrackerStateEstimator based on Boosting\n    */\nclass CV_EXPORTS TrackerStateEstimatorMILBoosting : public TrackerStateEstimator\n{\n public:\n\n  /**\n   * Implementation of the target state for TrackerStateEstimatorMILBoosting\n   */\n  class TrackerMILTargetState : public TrackerTargetState\n  {\n\n   public:\n    /**\n     * \\brief Constructor\n     * \\param position Top left corner of the bounding box\n     * \\param width Width of the bounding box\n     * \\param height Height of the bounding box\n     * \\param foreground label for target or background\n     * \\param features features extracted\n     */\n    TrackerMILTargetState( const Point2f& position, int width, int height, bool foreground, const Mat& features );\n\n    /**\n     * \\brief Destructor\n     */\n    ~TrackerMILTargetState()\n    {\n    }\n    ;\n\n    /** @brief Set label: true for target foreground, false for background\n    @param foreground Label for background/foreground\n     */\n    void setTargetFg( bool foreground );\n    /** @brief Set the features extracted from TrackerFeatureSet\n    @param features The features extracted\n     */\n    void setFeatures( const Mat& features );\n    /** @brief Get the label. Return true for target foreground, false for background\n     */\n    bool isTargetFg() const;\n    /** @brief Get the features extracted\n     */\n    Mat getFeatures() const;\n\n   private:\n    bool isTarget;\n    Mat targetFeatures;\n  };\n\n  /** @brief Constructor\n    @param nFeatures Number of features for each sample\n     */\n  TrackerStateEstimatorMILBoosting( int nFeatures = 250 );\n  ~TrackerStateEstimatorMILBoosting();\n\n  /** @brief Set the current confidenceMap\n    @param confidenceMap The current :cConfidenceMap\n     */\n  void setCurrentConfidenceMap( ConfidenceMap& confidenceMap );\n\n protected:\n  Ptr<TrackerTargetState> estimateImpl( const std::vector<ConfidenceMap>& confidenceMaps );\n  void updateImpl( std::vector<ConfidenceMap>& confidenceMaps );\n\n private:\n  uint max_idx( const std::vector<float> &v );\n  void prepareData( const ConfidenceMap& confidenceMap, Mat& positive, Mat& negative );\n\n  ClfMilBoost boostMILModel;\n  bool trained;\n  int numFeatures;\n\n  ConfidenceMap currentConfidenceMap;\n};\n\n/** @brief TrackerStateEstimatorAdaBoosting based on ADA-Boosting\n */\nclass CV_EXPORTS TrackerStateEstimatorAdaBoosting : public TrackerStateEstimator\n{\n public:\n  /** @brief Implementation of the target state for TrackerAdaBoostingTargetState\n    */\n  class TrackerAdaBoostingTargetState : public TrackerTargetState\n  {\n\n   public:\n    /**\n     * \\brief Constructor\n     * \\param position Top left corner of the bounding box\n     * \\param width Width of the bounding box\n     * \\param height Height of the bounding box\n     * \\param foreground label for target or background\n     * \\param responses list of features\n     */\n    TrackerAdaBoostingTargetState( const Point2f& position, int width, int height, bool foreground, const Mat& responses );\n\n    /**\n     * \\brief Destructor\n     */\n    ~TrackerAdaBoostingTargetState()\n    {\n    }\n    ;\n\n    /** @brief Set the features extracted from TrackerFeatureSet\n    @param responses The features extracted\n     */\n    void setTargetResponses( const Mat& responses );\n    /** @brief Set label: true for target foreground, false for background\n    @param foreground Label for background/foreground\n     */\n    void setTargetFg( bool foreground );\n    /** @brief Get the features extracted\n     */\n    Mat getTargetResponses() const;\n    /** @brief Get the label. Return true for target foreground, false for background\n    */\n    bool isTargetFg() const;\n\n   private:\n    bool isTarget;\n    Mat targetResponses;\n\n  };\n\n  /** @brief Constructor\n    @param numClassifer Number of base classifiers\n    @param initIterations Number of iterations in the initialization\n    @param nFeatures Number of features/weak classifiers\n    @param patchSize tracking rect\n    @param ROI initial ROI\n     */\n  TrackerStateEstimatorAdaBoosting( int numClassifer, int initIterations, int nFeatures, Size patchSize, const Rect& ROI );\n\n  /**\n   * \\brief Destructor\n   */\n  ~TrackerStateEstimatorAdaBoosting();\n\n  /** @brief Get the sampling ROI\n     */\n  Rect getSampleROI() const;\n\n  /** @brief Set the sampling ROI\n    @param ROI the sampling ROI\n     */\n  void setSampleROI( const Rect& ROI );\n\n  /** @brief Set the current confidenceMap\n    @param confidenceMap The current :cConfidenceMap\n     */\n  void setCurrentConfidenceMap( ConfidenceMap& confidenceMap );\n\n  /** @brief Get the list of the selected weak classifiers for the classification step\n     */\n  std::vector<int> computeSelectedWeakClassifier();\n\n  /** @brief Get the list of the weak classifiers that should be replaced\n     */\n  std::vector<int> computeReplacedClassifier();\n\n  /** @brief Get the list of the weak classifiers that replace those to be replaced\n     */\n  std::vector<int> computeSwappedClassifier();\n\n protected:\n  Ptr<TrackerTargetState> estimateImpl( const std::vector<ConfidenceMap>& confidenceMaps );\n  void updateImpl( std::vector<ConfidenceMap>& confidenceMaps );\n\n  Ptr<StrongClassifierDirectSelection> boostClassifier;\n\n private:\n  int numBaseClassifier;\n  int iterationInit;\n  int numFeatures;\n  bool trained;\n  Size initPatchSize;\n  Rect sampleROI;\n  std::vector<int> replacedClassifier;\n  std::vector<int> swappedClassifier;\n\n  ConfidenceMap currentConfidenceMap;\n};\n\n/**\n * \\brief TrackerStateEstimator based on SVM\n */\nclass CV_EXPORTS TrackerStateEstimatorSVM : public TrackerStateEstimator\n{\n public:\n  TrackerStateEstimatorSVM();\n  ~TrackerStateEstimatorSVM();\n\n protected:\n  Ptr<TrackerTargetState> estimateImpl( const std::vector<ConfidenceMap>& confidenceMaps );\n  void updateImpl( std::vector<ConfidenceMap>& confidenceMaps );\n};\n\n/************************************ Specific TrackerSamplerAlgorithm Classes ************************************/\n\n/** @brief TrackerSampler based on CSC (current state centered), used by MIL algorithm TrackerMIL\n */\nclass CV_EXPORTS TrackerSamplerCSC : public TrackerSamplerAlgorithm\n{\n public:\n  enum\n  {\n    MODE_INIT_POS = 1,  //!< mode for init positive samples\n    MODE_INIT_NEG = 2,  //!< mode for init negative samples\n    MODE_TRACK_POS = 3,  //!< mode for update positive samples\n    MODE_TRACK_NEG = 4,  //!< mode for update negative samples\n    MODE_DETECT = 5   //!< mode for detect samples\n  };\n\n  struct CV_EXPORTS Params\n  {\n    Params();\n    float initInRad;        //!< radius for gathering positive instances during init\n    float trackInPosRad;    //!< radius for gathering positive instances during tracking\n    float searchWinSize;\t//!< size of search window\n    int initMaxNegNum;      //!< # negative samples to use during init\n    int trackMaxPosNum;     //!< # positive samples to use during training\n    int trackMaxNegNum;     //!< # negative samples to use during training\n  };\n\n  /** @brief Constructor\n    @param parameters TrackerSamplerCSC parameters TrackerSamplerCSC::Params\n     */\n  TrackerSamplerCSC( const TrackerSamplerCSC::Params &parameters = TrackerSamplerCSC::Params() );\n\n  /** @brief Set the sampling mode of TrackerSamplerCSC\n    @param samplingMode The sampling mode\n\n    The modes are:\n\n    -   \"MODE_INIT_POS = 1\" -- for the positive sampling in initialization step\n    -   \"MODE_INIT_NEG = 2\" -- for the negative sampling in initialization step\n    -   \"MODE_TRACK_POS = 3\" -- for the positive sampling in update step\n    -   \"MODE_TRACK_NEG = 4\" -- for the negative sampling in update step\n    -   \"MODE_DETECT = 5\" -- for the sampling in detection step\n     */\n  void setMode( int samplingMode );\n\n  ~TrackerSamplerCSC();\n\n protected:\n\n  bool samplingImpl( const Mat& image, Rect boundingBox, std::vector<Mat>& sample );\n\n private:\n\n  Params params;\n  int mode;\n  RNG rng;\n\n  std::vector<Mat> sampleImage( const Mat& img, int x, int y, int w, int h, float inrad, float outrad = 0, int maxnum = 1000000 );\n};\n\n/** @brief TrackerSampler based on CS (current state), used by algorithm TrackerBoosting\n */\nclass CV_EXPORTS TrackerSamplerCS : public TrackerSamplerAlgorithm\n{\n public:\n  enum\n  {\n    MODE_POSITIVE = 1,  //!< mode for positive samples\n    MODE_NEGATIVE = 2,  //!< mode for negative samples\n    MODE_CLASSIFY = 3  //!< mode for classify samples\n  };\n\n  struct CV_EXPORTS Params\n  {\n    Params();\n    float overlap;  //!<overlapping for the search windows\n    float searchFactor;  //!<search region parameter\n  };\n  /** @brief Constructor\n    @param parameters TrackerSamplerCS parameters TrackerSamplerCS::Params\n     */\n  TrackerSamplerCS( const TrackerSamplerCS::Params &parameters = TrackerSamplerCS::Params() );\n\n  /** @brief Set the sampling mode of TrackerSamplerCS\n    @param samplingMode The sampling mode\n\n    The modes are:\n\n    -   \"MODE_POSITIVE = 1\" -- for the positive sampling\n    -   \"MODE_NEGATIVE = 2\" -- for the negative sampling\n    -   \"MODE_CLASSIFY = 3\" -- for the sampling in classification step\n     */\n  void setMode( int samplingMode );\n\n  ~TrackerSamplerCS();\n\n  bool samplingImpl( const Mat& image, Rect boundingBox, std::vector<Mat>& sample );\n  Rect getROI() const;\n private:\n  Rect getTrackingROI( float searchFactor );\n  Rect RectMultiply( const Rect & rect, float f );\n  std::vector<Mat> patchesRegularScan( const Mat& image, Rect trackingROI, Size patchSize );\n  void setCheckedROI( Rect imageROI );\n\n  Params params;\n  int mode;\n  Rect trackedPatch;\n  Rect validROI;\n  Rect ROI;\n\n};\n\n/** @brief This sampler is based on particle filtering.\n\nIn principle, it can be thought of as performing some sort of optimization (and indeed, this\ntracker uses opencv's optim module), where tracker seeks to find the rectangle in given frame,\nwhich is the most *\"similar\"* to the initial rectangle (the one, given through the constructor).\n\nThe optimization performed is stochastic and somehow resembles genetic algorithms, where on each new\nimage received (submitted via TrackerSamplerPF::sampling()) we start with the region bounded by\nboundingBox, then generate several \"perturbed\" boxes, take the ones most similar to the original.\nThis selection round is repeated several times. At the end, we hope that only the most promising box\nremaining, and these are combined to produce the subrectangle of image, which is put as a sole\nelement in array sample.\n\nIt should be noted, that the definition of \"similarity\" between two rectangles is based on comparing\ntheir histograms. As experiments show, tracker is *not* very succesfull if target is assumed to\nstrongly change its dimensions.\n */\nclass CV_EXPORTS TrackerSamplerPF : public TrackerSamplerAlgorithm\n{\npublic:\n  /** @brief This structure contains all the parameters that can be varied during the course of sampling\n    algorithm. Below is the structure exposed, together with its members briefly explained with\n    reference to the above discussion on algorithm's working.\n */\n  struct CV_EXPORTS Params\n  {\n    Params();\n    int iterationNum; //!< number of selection rounds\n    int particlesNum; //!< number of \"perturbed\" boxes on each round\n    double alpha; //!< with each new round we exponentially decrease the amount of \"perturbing\" we allow (like in simulated annealing)\n                  //!< and this very alpha controls how fast annealing happens, ie. how fast perturbing decreases\n    Mat_<double> std; //!< initial values for perturbing (1-by-4 array, as each rectangle is given by 4 values -- coordinates of opposite vertices,\n                      //!< hence we have 4 values to perturb)\n  };\n  /** @brief Constructor\n    @param chosenRect Initial rectangle, that is supposed to contain target we'd like to track.\n    @param parameters\n     */\n  TrackerSamplerPF(const Mat& chosenRect,const TrackerSamplerPF::Params &parameters = TrackerSamplerPF::Params());\nprotected:\n  bool samplingImpl( const Mat& image, Rect boundingBox, std::vector<Mat>& sample );\nprivate:\n  Params params;\n  Ptr<MinProblemSolver> _solver;\n  Ptr<MinProblemSolver::Function> _function;\n};\n\n/************************************ Specific TrackerFeature Classes ************************************/\n\n/**\n * \\brief TrackerFeature based on Feature2D\n */\nclass CV_EXPORTS TrackerFeatureFeature2d : public TrackerFeature\n{\n public:\n\n  /**\n   * \\brief Constructor\n   * \\param detectorType string of FeatureDetector\n   * \\param descriptorType string of DescriptorExtractor\n   */\n  TrackerFeatureFeature2d( String detectorType, String descriptorType );\n\n  ~TrackerFeatureFeature2d();\n\n  void selection( Mat& response, int npoints );\n\n protected:\n\n  bool computeImpl( const std::vector<Mat>& images, Mat& response );\n\n private:\n\n  std::vector<KeyPoint> keypoints;\n};\n\n/**\n * \\brief TrackerFeature based on HOG\n */\nclass CV_EXPORTS TrackerFeatureHOG : public TrackerFeature\n{\n public:\n\n  TrackerFeatureHOG();\n\n  ~TrackerFeatureHOG();\n\n  void selection( Mat& response, int npoints );\n\n protected:\n\n  bool computeImpl( const std::vector<Mat>& images, Mat& response );\n\n};\n\n/** @brief TrackerFeature based on HAAR features, used by TrackerMIL and many others algorithms\n@note HAAR features implementation is copied from apps/traincascade and modified according to MIL\n */\nclass CV_EXPORTS TrackerFeatureHAAR : public TrackerFeature\n{\n public:\n  struct CV_EXPORTS Params\n  {\n    Params();\n    int numFeatures;  //!< # of rects\n    Size rectSize;    //!< rect size\n    bool isIntegral;  //!< true if input images are integral, false otherwise\n  };\n\n  /** @brief Constructor\n    @param parameters TrackerFeatureHAAR parameters TrackerFeatureHAAR::Params\n     */\n  TrackerFeatureHAAR( const TrackerFeatureHAAR::Params &parameters = TrackerFeatureHAAR::Params() );\n\n  ~TrackerFeatureHAAR();\n\n  /** @brief Compute the features only for the selected indices in the images collection\n    @param selFeatures indices of selected features\n    @param images The images\n    @param response Collection of response for the specific TrackerFeature\n     */\n  bool extractSelected( const std::vector<int> selFeatures, const std::vector<Mat>& images, Mat& response );\n\n  /** @brief Identify most effective features\n    @param response Collection of response for the specific TrackerFeature\n    @param npoints Max number of features\n\n    @note This method modifies the response parameter\n     */\n  void selection( Mat& response, int npoints );\n\n  /** @brief Swap the feature in position source with the feature in position target\n  @param source The source position\n  @param target The target position\n */\n  bool swapFeature( int source, int target );\n\n  /** @brief   Swap the feature in position id with the feature input\n  @param id The position\n  @param feature The feature\n */\n  bool swapFeature( int id, CvHaarEvaluator::FeatureHaar& feature );\n\n  /** @brief Get the feature in position id\n    @param id The position\n     */\n  CvHaarEvaluator::FeatureHaar& getFeatureAt( int id );\n\n protected:\n  bool computeImpl( const std::vector<Mat>& images, Mat& response );\n\n private:\n\n  Params params;\n  Ptr<CvHaarEvaluator> featureEvaluator;\n};\n\n/**\n * \\brief TrackerFeature based on LBP\n */\nclass CV_EXPORTS TrackerFeatureLBP : public TrackerFeature\n{\n public:\n\n  TrackerFeatureLBP();\n\n  ~TrackerFeatureLBP();\n\n  void selection( Mat& response, int npoints );\n\n protected:\n\n  bool computeImpl( const std::vector<Mat>& images, Mat& response );\n\n};\n\n/************************************ Specific Tracker Classes ************************************/\n\n/** @brief The MIL algorithm trains a classifier in an online manner to separate the object from the\nbackground.\n\nMultiple Instance Learning avoids the drift problem for a robust tracking. The implementation is\nbased on @cite MIL .\n\nOriginal code can be found here <http://vision.ucsd.edu/~bbabenko/project_miltrack.shtml>\n */\nclass CV_EXPORTS TrackerMIL : public Tracker\n{\n public:\n  struct CV_EXPORTS Params\n  {\n    Params();\n    //parameters for sampler\n    float samplerInitInRadius;\t//!< radius for gathering positive instances during init\n    int samplerInitMaxNegNum;  //!< # negative samples to use during init\n    float samplerSearchWinSize;  //!< size of search window\n    float samplerTrackInRadius;  //!< radius for gathering positive instances during tracking\n    int samplerTrackMaxPosNum;\t//!< # positive samples to use during tracking\n    int samplerTrackMaxNegNum;\t//!< # negative samples to use during tracking\n    int featureSetNumFeatures;  //!< # features\n\n    void read( const FileNode& fn );\n    void write( FileStorage& fs ) const;\n  };\n\n  /** @brief Constructor\n    @param parameters MIL parameters TrackerMIL::Params\n     */\n  BOILERPLATE_CODE(\"MIL\",TrackerMIL);\n};\n\n/** @brief This is a real-time object tracking based on a novel on-line version of the AdaBoost algorithm.\n\nThe classifier uses the surrounding background as negative examples in update step to avoid the\ndrifting problem. The implementation is based on @cite OLB .\n */\nclass CV_EXPORTS TrackerBoosting : public Tracker\n{\n public:\n  struct CV_EXPORTS Params\n  {\n    Params();\n    int numClassifiers;  //!<the number of classifiers to use in a OnlineBoosting algorithm\n    float samplerOverlap;  //!<search region parameters to use in a OnlineBoosting algorithm\n    float samplerSearchFactor;  //!< search region parameters to use in a OnlineBoosting algorithm\n    int iterationInit;  //!<the initial iterations\n    int featureSetNumFeatures;  //!< # features\n    /**\n     * \\brief Read parameters from file\n     */\n    void read( const FileNode& fn );\n\n    /**\n     * \\brief Write parameters in a file\n     */\n    void write( FileStorage& fs ) const;\n  };\n\n  /** @brief Constructor\n    @param parameters BOOSTING parameters TrackerBoosting::Params\n     */\n  BOILERPLATE_CODE(\"BOOSTING\",TrackerBoosting);\n};\n\n/** @brief Median Flow tracker implementation.\n\nImplementation of a paper @cite MedianFlow .\n\nThe tracker is suitable for very smooth and predictable movements when object is visible throughout\nthe whole sequence. It's quite and accurate for this type of problems (in particular, it was shown\nby authors to outperform MIL). During the implementation period the code at\n<http://www.aonsquared.co.uk/node/5>, the courtesy of the author Arthur Amarra, was used for the\nreference purpose.\n */\nclass CV_EXPORTS TrackerMedianFlow : public Tracker\n{\n public:\n  struct CV_EXPORTS Params\n  {\n    Params();\n    int pointsInGrid; //!<square root of number of keypoints used; increase it to trade\n                      //!<accurateness for speed; default value is sensible and recommended\n    void read( const FileNode& /*fn*/ );\n    void write( FileStorage& /*fs*/ ) const;\n  };\n\n  /** @brief Constructor\n    @param parameters Median Flow parameters TrackerMedianFlow::Params\n    */\n  BOILERPLATE_CODE(\"MEDIANFLOW\",TrackerMedianFlow);\n};\n\n/** @brief TLD is a novel tracking framework that explicitly decomposes the long-term tracking task into\ntracking, learning and detection.\n\nThe tracker follows the object from frame to frame. The detector localizes all appearances that\nhave been observed so far and corrects the tracker if necessary. The learning estimates detector’s\nerrors and updates it to avoid these errors in the future. The implementation is based on @cite TLD .\n\nThe Median Flow algorithm (see cv::TrackerMedianFlow) was chosen as a tracking component in this\nimplementation, following authors. Tracker is supposed to be able to handle rapid motions, partial\nocclusions, object absence etc.\n */\nclass CV_EXPORTS TrackerTLD : public Tracker\n{\n public:\n  struct CV_EXPORTS Params\n  {\n    Params();\n    void read( const FileNode& /*fn*/ );\n    void write( FileStorage& /*fs*/ ) const;\n  };\n\n  /** @brief Constructor\n    @param parameters TLD parameters TrackerTLD::Params\n     */\n  BOILERPLATE_CODE(\"TLD\",TrackerTLD);\n};\n\n/** @brief KCF is a novel tracking framework that utilizes properties of circulant matrix to enhance the processing speed.\n * This tracking method is an implementation of @cite KCF_ECCV which is extended to KFC with color-names features (@cite KCF_CN).\n * The original paper of KCF is available at <http://home.isr.uc.pt/~henriques/circulant/index.html>\n * as well as the matlab implementation. For more information about KCF with color-names features, please refer to\n * <http://www.cvl.isy.liu.se/research/objrec/visualtracking/colvistrack/index.html>.\n */\nclass CV_EXPORTS TrackerKCF : public Tracker\n{\npublic:\n\t/**\n\t* \\brief Feature type to be used in the tracking grayscale, colornames, compressed color-names\n\t* The modes available now:\n\t-   \"GRAY\" -- Use grayscale values as the feature\n\t-   \"CN\" -- Color-names feature\n\t*/\n\tenum MODE {\n\t\tGRAY = (1u << 0),\n\t\tCN = (1u << 1),\n\t\tCUSTOM = (1u << 2)\n\t};\n\n\tstruct CV_EXPORTS Params\n\t{\n\t\t/**\n\t\t* \\brief Constructor\n\t\t*/\n\t\tParams();\n\n\t\t/**\n\t\t* \\brief Read parameters from file, currently unused\n\t\t*/\n\t\tvoid read(const FileNode& /*fn*/);\n\n\t\t/**\n\t\t* \\brief Read parameters from file, currently unused\n\t\t*/\n\t\tvoid write(FileStorage& /*fs*/) const;\n\n\t\tdouble sigma;                 //!<  gaussian kernel bandwidth\n\t\tdouble lambda;                //!<  regularization\n\t\tdouble interp_factor;         //!<  linear interpolation factor for adaptation\n\t\tdouble output_sigma_factor;   //!<  spatial bandwidth (proportional to target)\n\t\tdouble pca_learning_rate;     //!<  compression learning rate\n\t\tbool resize;                  //!<  activate the resize feature to improve the processing speed\n\t\tbool split_coeff;             //!<  split the training coefficients into two matrices\n\t\tbool wrap_kernel;             //!<  wrap around the kernel values\n\t\tbool compress_feature;        //!<  activate the pca method to compress the features\n\t\tint max_patch_size;           //!<  threshold for the ROI size\n\t\tint compressed_size;          //!<  feature size after compression\n\t\tunsigned int desc_pca;        //!<  compressed descriptors of TrackerKCF::MODE\n\t\tunsigned int desc_npca;       //!<  non-compressed descriptors of TrackerKCF::MODE\n\t};\n\n\tvirtual void setFeatureExtractor(void(*)(const Mat, const Rect, Mat&), bool pca_func = false);\n\n\t/** @brief Constructor\n\t@param parameters KCF parameters TrackerKCF::Params\n\t*/\n\tBOILERPLATE_CODE(\"KCF\", TrackerKCF);\n};\n\n/************************************ MultiTracker Class ---By Laksono Kurnianggoro---) ************************************/\n/** @brief This class is used to track multiple objects using the specified tracker algorithm.\n* The MultiTracker is naive implementation of multiple object tracking.\n* It process the tracked objects independently without any optimization accross the tracked objects.\n*/\nclass CV_EXPORTS_W MultiTracker\n{\npublic:\n\n\t/**\n\t* \\brief Constructor.\n\t* In the case of trackerType is given, it will be set as the default algorithm for all trackers.\n\t* @param trackerType the name of the tracker algorithm to be used\n\t*/\n\tCV_WRAP MultiTracker(const String& trackerType = \"\");\n\n\t/**\n\t* \\brief Destructor\n\t*/\n\t~MultiTracker();\n\n\t/**\n\t* \\brief Add a new object to be tracked.\n\t* The defaultAlgorithm will be used the newly added tracker.\n\t* @param image input image\n\t* @param boundingBox a rectangle represents ROI of the tracked object\n\t*/\n\tCV_WRAP bool add(const Mat& image, const Rect2d& boundingBox);\n\n\t/**\n\t* \\brief Add a new object to be tracked.\n\t* @param trackerType the name of the tracker algorithm to be used\n\t* @param image input image\n\t* @param boundingBox a rectangle represents ROI of the tracked object\n\t*/\n\tCV_WRAP bool add(const String& trackerType, const Mat& image, const Rect2d& boundingBox);\n\n\t/**\n\t* \\brief Add a set of objects to be tracked.\n\t* @param trackerType the name of the tracker algorithm to be used\n\t* @param image input image\n\t* @param boundingBox list of the tracked objects\n\t*/\n\tCV_WRAP bool add(const String& trackerType, const Mat& image, std::vector<Rect2d> boundingBox);\n\n\t/**\n\t* \\brief Add a set of objects to be tracked using the defaultAlgorithm tracker.\n\t* @param image input image\n\t* @param boundingBox list of the tracked objects\n\t*/\n\tCV_WRAP bool add(const Mat& image, std::vector<Rect2d> boundingBox);\n\n\t/**\n\t* \\brief Update the current tracking status.\n\t* The result will be saved in the internal storage.\n\t* @param image input image\n\t*/\n\tbool update(const Mat& image);\n\n\t//!<  storage for the tracked objects, each object corresponds to one tracker algorithm.\n\tstd::vector<Rect2d> objects;\n\n\t/**\n\t* \\brief Update the current tracking status.\n\t* @param image input image\n\t* @param boundingBox the tracking result, represent a list of ROIs of the tracked objects.\n\t*/\n\tCV_WRAP bool update(const Mat& image, CV_OUT std::vector<Rect2d> & boundingBox);\n\nprotected:\n\t//!<  storage for the tracker algorithms.\n\tstd::vector< Ptr<Tracker> > trackerList;\n\n\t//!<  default algorithm for the tracking method.\n\tString defaultAlgorithm;\n};\n\nclass ROISelector {\npublic:\n\tRect2d select(Mat img, bool fromCenter = true);\n\tRect2d select(const cv::String& windowName, Mat img, bool showCrossair = true, bool fromCenter = true);\n\tvoid select(const cv::String& windowName, Mat img, std::vector<Rect2d> & boundingBox, bool fromCenter = true);\n\n\tstruct handlerT{\n\t\t// basic parameters\n\t\tbool isDrawing;\n\t\tRect2d box;\n\t\tMat image;\n\n\t\t// parameters for drawing from the center\n\t\tbool drawFromCenter;\n\t\tPoint2f center;\n\n\t\t// initializer list\n\t\thandlerT() : isDrawing(false), drawFromCenter(true) {};\n\t}selectorParams;\n\n\t// to store the tracked objects\n\tstd::vector<handlerT> objects;\n\nprivate:\n\tstatic void mouseHandler(int event, int x, int y, int flags, void *param);\n\tvoid opencv_mouse_callback(int event, int x, int y, int, void *param);\n\n\t// save the keypressed characted\n\tint key;\n};\n\nRect2d CV_EXPORTS_W selectROI(Mat img, bool fromCenter = true);\nRect2d CV_EXPORTS_W selectROI(const cv::String& windowName, Mat img, bool showCrossair = true, bool fromCenter = true);\nvoid CV_EXPORTS_W selectROI(const cv::String& windowName, Mat img, std::vector<Rect2d> & boundingBox, bool fromCenter = true);\n\n\n/************************************ Multi-Tracker Classes ---By Tyan Vladimir---************************************/\n\n/** @brief Base abstract class for the long-term Multi Object Trackers:\n\n@sa Tracker, MultiTrackerTLD\n*/\nclass CV_EXPORTS MultiTracker_Alt\n{\npublic:\n\t/** @brief Constructor for Multitracker\n\t*/\n\tMultiTracker_Alt()\n\t{\n\t\ttargetNum = 0;\n\t}\n\n\t/** @brief Add a new target to a tracking-list and initialize the tracker with a know bounding box that surrounding the target\n\t@param image The initial frame\n\t@param boundingBox The initial boundig box of target\n\t@param tracker_algorithm_name Multi-tracker algorithm name\n\n\t@return True if new target initialization went succesfully, false otherwise\n\t*/\n\tbool addTarget(const Mat& image, const Rect2d& boundingBox, String tracker_algorithm_name);\n\n\t/** @brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets\n\t@param image The current frame\n\n\t@return True means that all targets were located and false means that tracker couldn't locate one of the targets in\n\tcurrent frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed\n\tmissing from the frame (say, out of sight)\n\t*/\n\tbool update(const Mat& image);\n\n\t/** @brief Current number of targets in tracking-list\n\t*/\n\tint targetNum;\n\n\t/** @brief Trackers list for Multi-Object-Tracker\n\t*/\n\tstd::vector <Ptr<Tracker> > trackers;\n\n\t/** @brief Bounding Boxes list for Multi-Object-Tracker\n\t*/\n\tstd::vector <Rect2d> boundingBoxes;\n\t/** @brief List of randomly generated colors for bounding boxes display\n\t*/\n\tstd::vector<Scalar> colors;\n};\n\n/** @brief Multi Object Tracker for TLD. TLD is a novel tracking framework that explicitly decomposes\nthe long-term tracking task into tracking, learning and detection.\n\nThe tracker follows the object from frame to frame. The detector localizes all appearances that\nhave been observed so far and corrects the tracker if necessary. The learning estimates detector’s\nerrors and updates it to avoid these errors in the future. The implementation is based on @cite TLD .\n\nThe Median Flow algorithm (see cv::TrackerMedianFlow) was chosen as a tracking component in this\nimplementation, following authors. Tracker is supposed to be able to handle rapid motions, partial\nocclusions, object absence etc.\n\n@sa Tracker, MultiTracker, TrackerTLD\n*/\nclass CV_EXPORTS MultiTrackerTLD : public MultiTracker_Alt\n{\npublic:\n\t/** @brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets by\n\toptimized update method using some techniques to speedup calculations specifically for MO TLD. The only limitation\n\tis that\tall target bounding boxes should have approximately same aspect ratios. Speed boost is around 20%\n\n\t@param image The current frame.\n\n\t@return True means that all targets were located and false means that tracker couldn't locate one of the targets in\n\tcurrent frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed\n\tmissing from the frame (say, out of sight)\n\t*/\n\tbool update_opt(const Mat& image);\n};\n\n//! @}\n\n} /* namespace cv */\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/tracking/tracking.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/tracking.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/tracking.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n //\n //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n //\n //  By downloading, copying, installing or using the software you agree to this license.\n //  If you do not agree to this license, do not download, install,\n //  copy or use the software.\n //\n //\n //                           License Agreement\n //                For Open Source Computer Vision Library\n //\n // Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n // Third party copyrights are property of their respective owners.\n //\n // Redistribution and use in source and binary forms, with or without modification,\n // are permitted provided that the following conditions are met:\n //\n //   * Redistribution's of source code must retain the above copyright notice,\n //     this list of conditions and the following disclaimer.\n //\n //   * Redistribution's in binary form must reproduce the above copyright notice,\n //     this list of conditions and the following disclaimer in the documentation\n //     and/or other materials provided with the distribution.\n //\n //   * The name of the copyright holders may not be used to endorse or promote products\n //     derived from this software without specific prior written permission.\n //\n // This software is provided by the copyright holders and contributors \"as is\" and\n // any express or implied warranties, including, but not limited to, the implied\n // warranties of merchantability and fitness for a particular purpose are disclaimed.\n // In no event shall the Intel Corporation or contributors be liable for any direct,\n // indirect, incidental, special, exemplary, or consequential damages\n // (including, but not limited to, procurement of substitute goods or services;\n // loss of use, data, or profits; or business interruption) however caused\n // and on any theory of liability, whether in contract, strict liability,\n // or tort (including negligence or otherwise) arising in any way out of\n // the use of this software, even if advised of the possibility of such damage.\n //\n //M*/\n\n#ifndef __OPENCV_TRACKING_LENLEN_HPP__\n#define __OPENCV_TRACKING_LENLEN_HPP__\n\n#include \"opencv2/core/cvdef.h\"\n\n/** @defgroup tracking Tracking API\n\nLong-term optical tracking API\n------------------------------\n\nLong-term optical tracking is one of most important issue for many computer vision applications in\nreal world scenario. The development in this area is very fragmented and this API is an unique\ninterface useful for plug several algorithms and compare them. This work is partially based on\n@cite AAM and @cite AMVOT .\n\nThis algorithms start from a bounding box of the target and with their internal representation they\navoid the drift during the tracking. These long-term trackers are able to evaluate online the\nquality of the location of the target in the new frame, without ground truth.\n\nThere are three main components: the TrackerSampler, the TrackerFeatureSet and the TrackerModel. The\nfirst component is the object that computes the patches over the frame based on the last target\nlocation. The TrackerFeatureSet is the class that manages the Features, is possible plug many kind\nof these (HAAR, HOG, LBP, Feature2D, etc). The last component is the internal representation of the\ntarget, it is the appearence model. It stores all state candidates and compute the trajectory (the\nmost likely target states). The class TrackerTargetState represents a possible state of the target.\nThe TrackerSampler and the TrackerFeatureSet are the visual representation of the target, instead\nthe TrackerModel is the statistical model.\n\nA recent benchmark between these algorithms can be found in @cite OOT\n\nUML design: see @ref tracking_diagrams\n\nTo see how API works, try tracker demo:\n<https://github.com/lenlen/opencv/blob/tracking_api/samples/cpp/tracker.cpp>\n\n@note This Tracking API has been designed with PlantUML. If you modify this API please change UML\nin <em>modules/tracking/doc/tracking_diagrams.markdown</em>. The following reference was used in the API\n\nCreating Own Tracker\n--------------------\n\nIf you want create a new tracker, here's what you have to do. First, decide on the name of the class\nfor the tracker (to meet the existing style, we suggest something with prefix \"tracker\", e.g.\ntrackerMIL, trackerBoosting) -- we shall refer to this choice as to \"classname\" in subsequent. Also,\nyou should decide upon the name of the tracker, is it will be known to user (the current style\nsuggests using all capitals, say MIL or BOOSTING) --we'll call it a \"name\".\n\n-   Declare your tracker in include/opencv2/tracking/tracker.hpp. Your tracker should inherit from\n    Tracker (please, see the example below). You should declare the specialized Param structure,\n    where you probably will want to put the data, needed to initialize your tracker. Also don't\n    forget to put the BOILERPLATE_CODE(name,classname) macro inside the class declaration. That\n    macro will generate static createTracker() function, which we'll talk about later. You should\n    get something similar to :\n@code\n        class CV_EXPORTS_W TrackerMIL : public Tracker\n        {\n         public:\n          struct CV_EXPORTS Params\n          {\n            Params();\n            //parameters for sampler\n            float samplerInitInRadius;  // radius for gathering positive instances during init\n            int samplerInitMaxNegNum;  // # negative samples to use during init\n            float samplerSearchWinSize;  // size of search window\n            float samplerTrackInRadius;  // radius for gathering positive instances during tracking\n            int samplerTrackMaxPosNum;  // # positive samples to use during tracking\n            int samplerTrackMaxNegNum;  // # negative samples to use during tracking\n            int featureSetNumFeatures;  // #features\n\n            void read( const FileNode& fn );\n            void write( FileStorage& fs ) const;\n          };\n@endcode\n    of course, you can also add any additional methods of your choice. It should be pointed out,\n    however, that it is not expected to have a constructor declared, as creation should be done via\n    the corresponding createTracker() method.\n-   In src/tracker.cpp file add BOILERPLATE_CODE(name,classname) line to the body of\n    Tracker::create() method you will find there, like :\n@code\n        Ptr<Tracker> Tracker::create( const String& trackerType )\n        {\n          BOILERPLATE_CODE(\"BOOSTING\",TrackerBoosting);\n          BOILERPLATE_CODE(\"MIL\",TrackerMIL);\n          return Ptr<Tracker>();\n        }\n@endcode\n-   Finally, you should implement the function with signature :\n@code\n        Ptr<classname> classname::createTracker(const classname::Params &parameters){\n            ...\n        }\n@endcode\n    That function can (and probably will) return a pointer to some derived class of \"classname\",\n    which will probably have a real constructor.\n\nEvery tracker has three component TrackerSampler, TrackerFeatureSet and TrackerModel. The first two\nare instantiated from Tracker base class, instead the last component is abstract, so you must\nimplement your TrackerModel.\n\n### TrackerSampler\n\nTrackerSampler is already instantiated, but you should define the sampling algorithm and add the\nclasses (or single class) to TrackerSampler. You can choose one of the ready implementation as\nTrackerSamplerCSC or you can implement your sampling method, in this case the class must inherit\nTrackerSamplerAlgorithm. Fill the samplingImpl method that writes the result in \"sample\" output\nargument.\n\nExample of creating specialized TrackerSamplerAlgorithm TrackerSamplerCSC : :\n@code\n    class CV_EXPORTS_W TrackerSamplerCSC : public TrackerSamplerAlgorithm\n    {\n     public:\n      TrackerSamplerCSC( const TrackerSamplerCSC::Params &parameters = TrackerSamplerCSC::Params() );\n      ~TrackerSamplerCSC();\n      ...\n\n     protected:\n      bool samplingImpl( const Mat& image, Rect boundingBox, std::vector<Mat>& sample );\n      ...\n\n    };\n@endcode\n\nExample of adding TrackerSamplerAlgorithm to TrackerSampler : :\n@code\n    //sampler is the TrackerSampler\n    Ptr<TrackerSamplerAlgorithm> CSCSampler = new TrackerSamplerCSC( CSCparameters );\n    if( !sampler->addTrackerSamplerAlgorithm( CSCSampler ) )\n     return false;\n\n    //or add CSC sampler with default parameters\n    //sampler->addTrackerSamplerAlgorithm( \"CSC\" );\n@endcode\n@sa\n   TrackerSamplerCSC, TrackerSamplerAlgorithm\n\n### TrackerFeatureSet\n\nTrackerFeatureSet is already instantiated (as first) , but you should define what kinds of features\nyou'll use in your tracker. You can use multiple feature types, so you can add a ready\nimplementation as TrackerFeatureHAAR in your TrackerFeatureSet or develop your own implementation.\nIn this case, in the computeImpl method put the code that extract the features and in the selection\nmethod optionally put the code for the refinement and selection of the features.\n\nExample of creating specialized TrackerFeature TrackerFeatureHAAR : :\n@code\n    class CV_EXPORTS_W TrackerFeatureHAAR : public TrackerFeature\n    {\n     public:\n      TrackerFeatureHAAR( const TrackerFeatureHAAR::Params &parameters = TrackerFeatureHAAR::Params() );\n      ~TrackerFeatureHAAR();\n      void selection( Mat& response, int npoints );\n      ...\n\n     protected:\n      bool computeImpl( const std::vector<Mat>& images, Mat& response );\n      ...\n\n    };\n@endcode\nExample of adding TrackerFeature to TrackerFeatureSet : :\n@code\n    //featureSet is the TrackerFeatureSet\n    Ptr<TrackerFeature> trackerFeature = new TrackerFeatureHAAR( HAARparameters );\n    featureSet->addTrackerFeature( trackerFeature );\n@endcode\n@sa\n   TrackerFeatureHAAR, TrackerFeatureSet\n\n### TrackerModel\n\nTrackerModel is abstract, so in your implementation you must develop your TrackerModel that inherit\nfrom TrackerModel. Fill the method for the estimation of the state \"modelEstimationImpl\", that\nestimates the most likely target location, see @cite AAM table I (ME) for further information. Fill\n\"modelUpdateImpl\" in order to update the model, see @cite AAM table I (MU). In this class you can use\nthe :cConfidenceMap and :cTrajectory to storing the model. The first represents the model on the all\npossible candidate states and the second represents the list of all estimated states.\n\nExample of creating specialized TrackerModel TrackerMILModel : :\n@code\n    class TrackerMILModel : public TrackerModel\n    {\n     public:\n      TrackerMILModel( const Rect& boundingBox );\n      ~TrackerMILModel();\n      ...\n\n     protected:\n      void modelEstimationImpl( const std::vector<Mat>& responses );\n      void modelUpdateImpl();\n      ...\n\n    };\n@endcode\nAnd add it in your Tracker : :\n@code\n    bool TrackerMIL::initImpl( const Mat& image, const Rect2d& boundingBox )\n    {\n      ...\n      //model is the general TrackerModel field of the general Tracker\n      model = new TrackerMILModel( boundingBox );\n      ...\n    }\n@endcode\nIn the last step you should define the TrackerStateEstimator based on your implementation or you can\nuse one of ready class as TrackerStateEstimatorMILBoosting. It represent the statistical part of the\nmodel that estimates the most likely target state.\n\nExample of creating specialized TrackerStateEstimator TrackerStateEstimatorMILBoosting : :\n@code\n    class CV_EXPORTS_W TrackerStateEstimatorMILBoosting : public TrackerStateEstimator\n    {\n     class TrackerMILTargetState : public TrackerTargetState\n     {\n     ...\n     };\n\n     public:\n      TrackerStateEstimatorMILBoosting( int nFeatures = 250 );\n      ~TrackerStateEstimatorMILBoosting();\n      ...\n\n     protected:\n      Ptr<TrackerTargetState> estimateImpl( const std::vector<ConfidenceMap>& confidenceMaps );\n      void updateImpl( std::vector<ConfidenceMap>& confidenceMaps );\n      ...\n\n    };\n@endcode\nAnd add it in your TrackerModel : :\n@code\n    //model is the TrackerModel of your Tracker\n    Ptr<TrackerStateEstimatorMILBoosting> stateEstimator = new TrackerStateEstimatorMILBoosting( params.featureSetNumFeatures );\n    model->setTrackerStateEstimator( stateEstimator );\n@endcode\n@sa\n   TrackerModel, TrackerStateEstimatorMILBoosting, TrackerTargetState\n\nDuring this step, you should define your TrackerTargetState based on your implementation.\nTrackerTargetState base class has only the bounding box (upper-left position, width and height), you\ncan enrich it adding scale factor, target rotation, etc.\n\nExample of creating specialized TrackerTargetState TrackerMILTargetState : :\n@code\n    class TrackerMILTargetState : public TrackerTargetState\n    {\n     public:\n      TrackerMILTargetState( const Point2f& position, int targetWidth, int targetHeight, bool foreground, const Mat& features );\n      ~TrackerMILTargetState();\n      ...\n\n     private:\n      bool isTarget;\n      Mat targetFeatures;\n      ...\n\n    };\n@endcode\n### Try it\n\nTo try your tracker you can use the demo at\n<https://github.com/lenlen/opencv/blob/tracking_api/samples/cpp/tracker.cpp>.\n\nThe first argument is the name of the tracker and the second is a video source.\n\n*/\n\n#include <opencv2/tracking/tracker.hpp>\n#include <opencv2/tracking/tldDataset.hpp>\n\n#endif //__OPENCV_TRACKING_LENLEN\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/video/background_segm.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_BACKGROUND_SEGM_HPP__\n#define __OPENCV_BACKGROUND_SEGM_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup video_motion\n//! @{\n\n/** @brief Base class for background/foreground segmentation. :\n\nThe class is only used to define the common interface for the whole family of background/foreground\nsegmentation algorithms.\n */\nclass CV_EXPORTS_W BackgroundSubtractor : public Algorithm\n{\npublic:\n    /** @brief Computes a foreground mask.\n\n    @param image Next video frame.\n    @param fgmask The output foreground mask as an 8-bit binary image.\n    @param learningRate The value between 0 and 1 that indicates how fast the background model is\n    learnt. Negative parameter value makes the algorithm to use some automatically chosen learning\n    rate. 0 means that the background model is not updated at all, 1 means that the background model\n    is completely reinitialized from the last frame.\n     */\n    CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) = 0;\n\n    /** @brief Computes a background image.\n\n    @param backgroundImage The output background image.\n\n    @note Sometimes the background image can be very blurry, as it contain the average background\n    statistics.\n     */\n    CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const = 0;\n};\n\n\n/** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.\n\nThe class implements the Gaussian mixture model background subtraction described in @cite Zivkovic2004\nand @cite Zivkovic2006 .\n */\nclass CV_EXPORTS_W BackgroundSubtractorMOG2 : public BackgroundSubtractor\n{\npublic:\n    /** @brief Returns the number of last frames that affect the background model\n    */\n    CV_WRAP virtual int getHistory() const = 0;\n    /** @brief Sets the number of last frames that affect the background model\n    */\n    CV_WRAP virtual void setHistory(int history) = 0;\n\n    /** @brief Returns the number of gaussian components in the background model\n    */\n    CV_WRAP virtual int getNMixtures() const = 0;\n    /** @brief Sets the number of gaussian components in the background model.\n\n    The model needs to be reinitalized to reserve memory.\n    */\n    CV_WRAP virtual void setNMixtures(int nmixtures) = 0;//needs reinitialization!\n\n    /** @brief Returns the \"background ratio\" parameter of the algorithm\n\n    If a foreground pixel keeps semi-constant value for about backgroundRatio\\*history frames, it's\n    considered background and added to the model as a center of a new component. It corresponds to TB\n    parameter in the paper.\n     */\n    CV_WRAP virtual double getBackgroundRatio() const = 0;\n    /** @brief Sets the \"background ratio\" parameter of the algorithm\n    */\n    CV_WRAP virtual void setBackgroundRatio(double ratio) = 0;\n\n    /** @brief Returns the variance threshold for the pixel-model match\n\n    The main threshold on the squared Mahalanobis distance to decide if the sample is well described by\n    the background model or not. Related to Cthr from the paper.\n     */\n    CV_WRAP virtual double getVarThreshold() const = 0;\n    /** @brief Sets the variance threshold for the pixel-model match\n    */\n    CV_WRAP virtual void setVarThreshold(double varThreshold) = 0;\n\n    /** @brief Returns the variance threshold for the pixel-model match used for new mixture component generation\n\n    Threshold for the squared Mahalanobis distance that helps decide when a sample is close to the\n    existing components (corresponds to Tg in the paper). If a pixel is not close to any component, it\n    is considered foreground or added as a new component. 3 sigma =\\> Tg=3\\*3=9 is default. A smaller Tg\n    value generates more components. A higher Tg value may result in a small number of components but\n    they can grow too large.\n     */\n    CV_WRAP virtual double getVarThresholdGen() const = 0;\n    /** @brief Sets the variance threshold for the pixel-model match used for new mixture component generation\n    */\n    CV_WRAP virtual void setVarThresholdGen(double varThresholdGen) = 0;\n\n    /** @brief Returns the initial variance of each gaussian component\n    */\n    CV_WRAP virtual double getVarInit() const = 0;\n    /** @brief Sets the initial variance of each gaussian component\n    */\n    CV_WRAP virtual void setVarInit(double varInit) = 0;\n\n    CV_WRAP virtual double getVarMin() const = 0;\n    CV_WRAP virtual void setVarMin(double varMin) = 0;\n\n    CV_WRAP virtual double getVarMax() const = 0;\n    CV_WRAP virtual void setVarMax(double varMax) = 0;\n\n    /** @brief Returns the complexity reduction threshold\n\n    This parameter defines the number of samples needed to accept to prove the component exists. CT=0.05\n    is a default value for all the samples. By setting CT=0 you get an algorithm very similar to the\n    standard Stauffer&Grimson algorithm.\n     */\n    CV_WRAP virtual double getComplexityReductionThreshold() const = 0;\n    /** @brief Sets the complexity reduction threshold\n    */\n    CV_WRAP virtual void setComplexityReductionThreshold(double ct) = 0;\n\n    /** @brief Returns the shadow detection flag\n\n    If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorMOG2 for\n    details.\n     */\n    CV_WRAP virtual bool getDetectShadows() const = 0;\n    /** @brief Enables or disables shadow detection\n    */\n    CV_WRAP virtual void setDetectShadows(bool detectShadows) = 0;\n\n    /** @brief Returns the shadow value\n\n    Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0\n    in the mask always means background, 255 means foreground.\n     */\n    CV_WRAP virtual int getShadowValue() const = 0;\n    /** @brief Sets the shadow value\n    */\n    CV_WRAP virtual void setShadowValue(int value) = 0;\n\n    /** @brief Returns the shadow threshold\n\n    A shadow is detected if pixel is a darker version of the background. The shadow threshold (Tau in\n    the paper) is a threshold defining how much darker the shadow can be. Tau= 0.5 means that if a pixel\n    is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiarra,\n    *Detecting Moving Shadows...*, IEEE PAMI,2003.\n     */\n    CV_WRAP virtual double getShadowThreshold() const = 0;\n    /** @brief Sets the shadow threshold\n    */\n    CV_WRAP virtual void setShadowThreshold(double threshold) = 0;\n};\n\n/** @brief Creates MOG2 Background Subtractor\n\n@param history Length of the history.\n@param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model\nto decide whether a pixel is well described by the background model. This parameter does not\naffect the background update.\n@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the\nspeed a bit, so if you do not need this feature, set the parameter to false.\n */\nCV_EXPORTS_W Ptr<BackgroundSubtractorMOG2>\n    createBackgroundSubtractorMOG2(int history=500, double varThreshold=16,\n                                   bool detectShadows=true);\n\n/** @brief K-nearest neigbours - based Background/Foreground Segmentation Algorithm.\n\nThe class implements the K-nearest neigbours background subtraction described in @cite Zivkovic2006 .\nVery efficient if number of foreground pixels is low.\n */\nclass CV_EXPORTS_W BackgroundSubtractorKNN : public BackgroundSubtractor\n{\npublic:\n    /** @brief Returns the number of last frames that affect the background model\n    */\n    CV_WRAP virtual int getHistory() const = 0;\n    /** @brief Sets the number of last frames that affect the background model\n    */\n    CV_WRAP virtual void setHistory(int history) = 0;\n\n    /** @brief Returns the number of data samples in the background model\n    */\n    CV_WRAP virtual int getNSamples() const = 0;\n    /** @brief Sets the number of data samples in the background model.\n\n    The model needs to be reinitalized to reserve memory.\n    */\n    CV_WRAP virtual void setNSamples(int _nN) = 0;//needs reinitialization!\n\n    /** @brief Returns the threshold on the squared distance between the pixel and the sample\n\n    The threshold on the squared distance between the pixel and the sample to decide whether a pixel is\n    close to a data sample.\n     */\n    CV_WRAP virtual double getDist2Threshold() const = 0;\n    /** @brief Sets the threshold on the squared distance\n    */\n    CV_WRAP virtual void setDist2Threshold(double _dist2Threshold) = 0;\n\n    /** @brief Returns the number of neighbours, the k in the kNN.\n\n    K is the number of samples that need to be within dist2Threshold in order to decide that that\n    pixel is matching the kNN background model.\n     */\n    CV_WRAP virtual int getkNNSamples() const = 0;\n    /** @brief Sets the k in the kNN. How many nearest neigbours need to match.\n    */\n    CV_WRAP virtual void setkNNSamples(int _nkNN) = 0;\n\n    /** @brief Returns the shadow detection flag\n\n    If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorKNN for\n    details.\n     */\n    CV_WRAP virtual bool getDetectShadows() const = 0;\n    /** @brief Enables or disables shadow detection\n    */\n    CV_WRAP virtual void setDetectShadows(bool detectShadows) = 0;\n\n    /** @brief Returns the shadow value\n\n    Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0\n    in the mask always means background, 255 means foreground.\n     */\n    CV_WRAP virtual int getShadowValue() const = 0;\n    /** @brief Sets the shadow value\n    */\n    CV_WRAP virtual void setShadowValue(int value) = 0;\n\n    /** @brief Returns the shadow threshold\n\n    A shadow is detected if pixel is a darker version of the background. The shadow threshold (Tau in\n    the paper) is a threshold defining how much darker the shadow can be. Tau= 0.5 means that if a pixel\n    is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiarra,\n    *Detecting Moving Shadows...*, IEEE PAMI,2003.\n     */\n    CV_WRAP virtual double getShadowThreshold() const = 0;\n    /** @brief Sets the shadow threshold\n     */\n    CV_WRAP virtual void setShadowThreshold(double threshold) = 0;\n};\n\n/** @brief Creates KNN Background Subtractor\n\n@param history Length of the history.\n@param dist2Threshold Threshold on the squared distance between the pixel and the sample to decide\nwhether a pixel is close to that sample. This parameter does not affect the background update.\n@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the\nspeed a bit, so if you do not need this feature, set the parameter to false.\n */\nCV_EXPORTS_W Ptr<BackgroundSubtractorKNN>\n    createBackgroundSubtractorKNN(int history=500, double dist2Threshold=400.0,\n                                   bool detectShadows=true);\n\n//! @} video_motion\n\n} // cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/video/tracking.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_TRACKING_HPP__\n#define __OPENCV_TRACKING_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/imgproc.hpp\"\n\nnamespace cv\n{\n\n//! @addtogroup video_track\n//! @{\n\nenum { OPTFLOW_USE_INITIAL_FLOW     = 4,\n       OPTFLOW_LK_GET_MIN_EIGENVALS = 8,\n       OPTFLOW_FARNEBACK_GAUSSIAN   = 256\n     };\n\n/** @brief Finds an object center, size, and orientation.\n\n@param probImage Back projection of the object histogram. See calcBackProject.\n@param window Initial search window.\n@param criteria Stop criteria for the underlying meanShift.\nreturns\n(in old interfaces) Number of iterations CAMSHIFT took to converge\nThe function implements the CAMSHIFT object tracking algorithm @cite Bradski98 . First, it finds an\nobject center using meanShift and then adjusts the window size and finds the optimal rotation. The\nfunction returns the rotated rectangle structure that includes the object position, size, and\norientation. The next position of the search window can be obtained with RotatedRect::boundingRect()\n\nSee the OpenCV sample camshiftdemo.c that tracks colored objects.\n\n@note\n-   (Python) A sample explaining the camshift tracking algorithm can be found at\n    opencv_source_code/samples/python/camshift.py\n */\nCV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_IN_OUT Rect& window,\n                                   TermCriteria criteria );\n\n/** @brief Finds an object on a back projection image.\n\n@param probImage Back projection of the object histogram. See calcBackProject for details.\n@param window Initial search window.\n@param criteria Stop criteria for the iterative search algorithm.\nreturns\n:   Number of iterations CAMSHIFT took to converge.\nThe function implements the iterative object search algorithm. It takes the input back projection of\nan object and the initial position. The mass center in window of the back projection image is\ncomputed and the search window center shifts to the mass center. The procedure is repeated until the\nspecified number of iterations criteria.maxCount is done or until the window center shifts by less\nthan criteria.epsilon. The algorithm is used inside CamShift and, unlike CamShift , the search\nwindow size or orientation do not change during the search. You can simply pass the output of\ncalcBackProject to this function. But better results can be obtained if you pre-filter the back\nprojection and remove the noise. For example, you can do this by retrieving connected components\nwith findContours , throwing away contours with small area ( contourArea ), and rendering the\nremaining contours with drawContours.\n\n@note\n-   A mean-shift tracking sample can be found at opencv_source_code/samples/cpp/camshiftdemo.cpp\n */\nCV_EXPORTS_W int meanShift( InputArray probImage, CV_IN_OUT Rect& window, TermCriteria criteria );\n\n/** @brief Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.\n\n@param img 8-bit input image.\n@param pyramid output pyramid.\n@param winSize window size of optical flow algorithm. Must be not less than winSize argument of\ncalcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.\n@param maxLevel 0-based maximal pyramid level number.\n@param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is\nconstructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.\n@param pyrBorder the border mode for pyramid layers.\n@param derivBorder the border mode for gradients.\n@param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false\nto force data copying.\n@return number of levels in constructed pyramid. Can be less than maxLevel.\n */\nCV_EXPORTS_W int buildOpticalFlowPyramid( InputArray img, OutputArrayOfArrays pyramid,\n                                          Size winSize, int maxLevel, bool withDerivatives = true,\n                                          int pyrBorder = BORDER_REFLECT_101,\n                                          int derivBorder = BORDER_CONSTANT,\n                                          bool tryReuseInputImage = true );\n\n/** @brief Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with\npyramids.\n\n@param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.\n@param nextImg second input image or pyramid of the same size and the same type as prevImg.\n@param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be\nsingle-precision floating-point numbers.\n@param nextPts output vector of 2D points (with single-precision floating-point coordinates)\ncontaining the calculated new positions of input features in the second image; when\nOPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.\n@param status output status vector (of unsigned chars); each element of the vector is set to 1 if\nthe flow for the corresponding features has been found, otherwise, it is set to 0.\n@param err output vector of errors; each element of the vector is set to an error for the\ncorresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't\nfound then the error is not defined (use the status parameter to find such cases).\n@param winSize size of the search window at each pyramid level.\n@param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single\nlevel), if set to 1, two levels are used, and so on; if pyramids are passed to input then\nalgorithm will use as many levels as pyramids have but no more than maxLevel.\n@param criteria parameter, specifying the termination criteria of the iterative search algorithm\n(after the specified maximum number of iterations criteria.maxCount or when the search window\nmoves by less than criteria.epsilon.\n@param flags operation flags:\n -   **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is\n     not set, then prevPts is copied to nextPts and is considered the initial estimate.\n -   **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see\n     minEigThreshold description); if the flag is not set, then L1 distance between patches\n     around the original and a moved point, divided by number of pixels in a window, is used as a\n     error measure.\n@param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of\noptical flow equations (this matrix is called a spatial gradient matrix in @cite Bouguet00), divided\nby number of pixels in a window; if this value is less than minEigThreshold, then a corresponding\nfeature is filtered out and its flow is not processed, so it allows to remove bad points and get a\nperformance boost.\n\nThe function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See\n@cite Bouguet00 . The function is parallelized with the TBB library.\n\n@note\n\n-   An example using the Lucas-Kanade optical flow algorithm can be found at\n    opencv_source_code/samples/cpp/lkdemo.cpp\n-   (Python) An example using the Lucas-Kanade optical flow algorithm can be found at\n    opencv_source_code/samples/python/lk_track.py\n-   (Python) An example using the Lucas-Kanade tracker for homography matching can be found at\n    opencv_source_code/samples/python/lk_homography.py\n */\nCV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg,\n                                        InputArray prevPts, InputOutputArray nextPts,\n                                        OutputArray status, OutputArray err,\n                                        Size winSize = Size(21,21), int maxLevel = 3,\n                                        TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01),\n                                        int flags = 0, double minEigThreshold = 1e-4 );\n\n/** @brief Computes a dense optical flow using the Gunnar Farneback's algorithm.\n\n@param prev first 8-bit single-channel input image.\n@param next second input image of the same size and the same type as prev.\n@param flow computed flow image that has the same size as prev and type CV_32FC2.\n@param pyr_scale parameter, specifying the image scale (\\<1) to build pyramids for each image;\npyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous\none.\n@param levels number of pyramid layers including the initial image; levels=1 means that no extra\nlayers are created and only the original images are used.\n@param winsize averaging window size; larger values increase the algorithm robustness to image\nnoise and give more chances for fast motion detection, but yield more blurred motion field.\n@param iterations number of iterations the algorithm does at each pyramid level.\n@param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel;\nlarger values mean that the image will be approximated with smoother surfaces, yielding more\nrobust algorithm and more blurred motion field, typically poly_n =5 or 7.\n@param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a\nbasis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a\ngood value would be poly_sigma=1.5.\n@param flags operation flags that can be a combination of the following:\n -   **OPTFLOW_USE_INITIAL_FLOW** uses the input flow as an initial flow approximation.\n -   **OPTFLOW_FARNEBACK_GAUSSIAN** uses the Gaussian \\f$\\texttt{winsize}\\times\\texttt{winsize}\\f$\n     filter instead of a box filter of the same size for optical flow estimation; usually, this\n     option gives z more accurate flow than with a box filter, at the cost of lower speed;\n     normally, winsize for a Gaussian window should be set to a larger value to achieve the same\n     level of robustness.\n\nThe function finds an optical flow for each prev pixel using the @cite Farneback2003 algorithm so that\n\n\\f[\\texttt{prev} (y,x)  \\sim \\texttt{next} ( y + \\texttt{flow} (y,x)[1],  x + \\texttt{flow} (y,x)[0])\\f]\n\n@note\n\n-   An example using the optical flow algorithm described by Gunnar Farneback can be found at\n    opencv_source_code/samples/cpp/fback.cpp\n-   (Python) An example using the optical flow algorithm described by Gunnar Farneback can be\n    found at opencv_source_code/samples/python/opt_flow.py\n */\nCV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next, InputOutputArray flow,\n                                            double pyr_scale, int levels, int winsize,\n                                            int iterations, int poly_n, double poly_sigma,\n                                            int flags );\n\n/** @brief Computes an optimal affine transformation between two 2D point sets.\n\n@param src First input 2D point set stored in std::vector or Mat, or an image stored in Mat.\n@param dst Second input 2D point set of the same size and the same type as A, or another image.\n@param fullAffine If true, the function finds an optimal affine transformation with no additional\nrestrictions (6 degrees of freedom). Otherwise, the class of transformations to choose from is\nlimited to combinations of translation, rotation, and uniform scaling (5 degrees of freedom).\n\nThe function finds an optimal affine transform *[A|b]* (a 2 x 3 floating-point matrix) that\napproximates best the affine transformation between:\n\n*   Two point sets\n*   Two raster images. In this case, the function first finds some features in the src image and\n    finds the corresponding features in dst image. After that, the problem is reduced to the first\n    case.\nIn case of point sets, the problem is formulated as follows: you need to find a 2x2 matrix *A* and\n2x1 vector *b* so that:\n\n\\f[[A^*|b^*] = arg  \\min _{[A|b]}  \\sum _i  \\| \\texttt{dst}[i] - A { \\texttt{src}[i]}^T - b  \\| ^2\\f]\nwhere src[i] and dst[i] are the i-th points in src and dst, respectively\n\\f$[A|b]\\f$ can be either arbitrary (when fullAffine=true ) or have a form of\n\\f[\\begin{bmatrix} a_{11} & a_{12} & b_1  \\\\ -a_{12} & a_{11} & b_2  \\end{bmatrix}\\f]\nwhen fullAffine=false.\n\n@sa\ngetAffineTransform, getPerspectiveTransform, findHomography\n */\nCV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine );\n\n\nenum\n{\n    MOTION_TRANSLATION = 0,\n    MOTION_EUCLIDEAN   = 1,\n    MOTION_AFFINE      = 2,\n    MOTION_HOMOGRAPHY  = 3\n};\n\n/** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 .\n\n@param templateImage single-channel template image; CV_8U or CV_32F array.\n@param inputImage single-channel input image which should be warped with the final warpMatrix in\norder to provide an image similar to templateImage, same type as temlateImage.\n@param warpMatrix floating-point \\f$2\\times 3\\f$ or \\f$3\\times 3\\f$ mapping matrix (warp).\n@param motionType parameter, specifying the type of motion:\n -   **MOTION_TRANSLATION** sets a translational motion model; warpMatrix is \\f$2\\times 3\\f$ with\n     the first \\f$2\\times 2\\f$ part being the unity matrix and the rest two parameters being\n     estimated.\n -   **MOTION_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three\n     parameters are estimated; warpMatrix is \\f$2\\times 3\\f$.\n -   **MOTION_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated;\n     warpMatrix is \\f$2\\times 3\\f$.\n -   **MOTION_HOMOGRAPHY** sets a homography as a motion model; eight parameters are\n     estimated;\\`warpMatrix\\` is \\f$3\\times 3\\f$.\n@param criteria parameter, specifying the termination criteria of the ECC algorithm;\ncriteria.epsilon defines the threshold of the increment in the correlation coefficient between two\niterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion).\nDefault values are shown in the declaration above.\n@param inputMask An optional mask to indicate valid values of inputImage.\n\nThe function estimates the optimum transformation (warpMatrix) with respect to ECC criterion\n(@cite EP08), that is\n\n\\f[\\texttt{warpMatrix} = \\texttt{warpMatrix} = \\arg\\max_{W} \\texttt{ECC}(\\texttt{templateImage}(x,y),\\texttt{inputImage}(x',y'))\\f]\n\nwhere\n\n\\f[\\begin{bmatrix} x' \\\\ y' \\end{bmatrix} = W \\cdot \\begin{bmatrix} x \\\\ y \\\\ 1 \\end{bmatrix}\\f]\n\n(the equation holds with homogeneous coordinates for homography). It returns the final enhanced\ncorrelation coefficient, that is the correlation coefficient between the template image and the\nfinal warped input image. When a \\f$3\\times 3\\f$ matrix is given with motionType =0, 1 or 2, the third\nrow is ignored.\n\nUnlike findHomography and estimateRigidTransform, the function findTransformECC implements an\narea-based alignment that builds on intensity similarities. In essence, the function updates the\ninitial transformation that roughly aligns the images. If this information is missing, the identity\nwarp (unity matrix) should be given as input. Note that if images undergo strong\ndisplacements/rotations, an initial transformation that roughly aligns the images is necessary\n(e.g., a simple euclidean/similarity transform that allows for the images showing the same image\ncontent approximately). Use inverse warping in the second image to take an image close to the first\none, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV\nsample image_alignment.cpp that demonstrates the use of the function. Note that the function throws\nan exception if algorithm does not converges.\n\n@sa\nestimateRigidTransform, findHomography\n */\nCV_EXPORTS_W double findTransformECC( InputArray templateImage, InputArray inputImage,\n                                      InputOutputArray warpMatrix, int motionType = MOTION_AFFINE,\n                                      TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001),\n                                      InputArray inputMask = noArray());\n\n/** @brief Kalman filter class.\n\nThe class implements a standard Kalman filter <http://en.wikipedia.org/wiki/Kalman_filter>,\n@cite Welch95 . However, you can modify transitionMatrix, controlMatrix, and measurementMatrix to get\nan extended Kalman filter functionality. See the OpenCV sample kalman.cpp.\n\n@note\n\n-   An example using the standard Kalman filter can be found at\n    opencv_source_code/samples/cpp/kalman.cpp\n */\nclass CV_EXPORTS_W KalmanFilter\n{\npublic:\n    /** @brief The constructors.\n\n    @note In C API when CvKalman\\* kalmanFilter structure is not needed anymore, it should be released\n    with cvReleaseKalman(&kalmanFilter)\n     */\n    CV_WRAP KalmanFilter();\n    /** @overload\n    @param dynamParams Dimensionality of the state.\n    @param measureParams Dimensionality of the measurement.\n    @param controlParams Dimensionality of the control vector.\n    @param type Type of the created matrices that should be CV_32F or CV_64F.\n    */\n    CV_WRAP KalmanFilter( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F );\n\n    /** @brief Re-initializes Kalman filter. The previous content is destroyed.\n\n    @param dynamParams Dimensionality of the state.\n    @param measureParams Dimensionality of the measurement.\n    @param controlParams Dimensionality of the control vector.\n    @param type Type of the created matrices that should be CV_32F or CV_64F.\n     */\n    void init( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F );\n\n    /** @brief Computes a predicted state.\n\n    @param control The optional input control\n     */\n    CV_WRAP const Mat& predict( const Mat& control = Mat() );\n\n    /** @brief Updates the predicted state from the measurement.\n\n    @param measurement The measured system parameters\n     */\n    CV_WRAP const Mat& correct( const Mat& measurement );\n\n    CV_PROP_RW Mat statePre;           //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k)\n    CV_PROP_RW Mat statePost;          //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))\n    CV_PROP_RW Mat transitionMatrix;   //!< state transition matrix (A)\n    CV_PROP_RW Mat controlMatrix;      //!< control matrix (B) (not used if there is no control)\n    CV_PROP_RW Mat measurementMatrix;  //!< measurement matrix (H)\n    CV_PROP_RW Mat processNoiseCov;    //!< process noise covariance matrix (Q)\n    CV_PROP_RW Mat measurementNoiseCov;//!< measurement noise covariance matrix (R)\n    CV_PROP_RW Mat errorCovPre;        //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/\n    CV_PROP_RW Mat gain;               //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)\n    CV_PROP_RW Mat errorCovPost;       //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)\n\n    // temporary matrices\n    Mat temp1;\n    Mat temp2;\n    Mat temp3;\n    Mat temp4;\n    Mat temp5;\n};\n\n\nclass CV_EXPORTS_W DenseOpticalFlow : public Algorithm\n{\npublic:\n    /** @brief Calculates an optical flow.\n\n    @param I0 first 8-bit single-channel input image.\n    @param I1 second input image of the same size and the same type as prev.\n    @param flow computed flow image that has the same size as prev and type CV_32FC2.\n     */\n    CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0;\n    /** @brief Releases all inner buffers.\n    */\n    CV_WRAP virtual void collectGarbage() = 0;\n};\n\n/** @brief \"Dual TV L1\" Optical Flow Algorithm.\n\nThe class implements the \"Dual TV L1\" optical flow algorithm described in @cite Zach2007 and\n@cite Javier2012 .\nHere are important members of the class that control the algorithm, which you can set after\nconstructing the class instance:\n\n-   member double tau\n    Time step of the numerical scheme.\n\n-   member double lambda\n    Weight parameter for the data term, attachment parameter. This is the most relevant\n    parameter, which determines the smoothness of the output. The smaller this parameter is,\n    the smoother the solutions we obtain. It depends on the range of motions of the images, so\n    its value should be adapted to each image sequence.\n\n-   member double theta\n    Weight parameter for (u - v)\\^2, tightness parameter. It serves as a link between the\n    attachment and the regularization terms. In theory, it should have a small value in order\n    to maintain both parts in correspondence. The method is stable for a large range of values\n    of this parameter.\n\n-   member int nscales\n    Number of scales used to create the pyramid of images.\n\n-   member int warps\n    Number of warpings per scale. Represents the number of times that I1(x+u0) and grad(\n    I1(x+u0) ) are computed per scale. This is a parameter that assures the stability of the\n    method. It also affects the running time, so it is a compromise between speed and\n    accuracy.\n\n-   member double epsilon\n    Stopping criterion threshold used in the numerical scheme, which is a trade-off between\n    precision and running time. A small value will yield more accurate solutions at the\n    expense of a slower convergence.\n\n-   member int iterations\n    Stopping criterion iterations number used in the numerical scheme.\n\nC. Zach, T. Pock and H. Bischof, \"A Duality Based Approach for Realtime TV-L1 Optical Flow\".\nJavier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. \"TV-L1 Optical Flow Estimation\".\n*/\nclass CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow\n{\npublic:\n    //! @brief Time step of the numerical scheme\n    /** @see setTau */\n    virtual double getTau() const = 0;\n    /** @copybrief getTau @see getTau */\n    virtual void setTau(double val) = 0;\n    //! @brief Weight parameter for the data term, attachment parameter\n    /** @see setLambda */\n    virtual double getLambda() const = 0;\n    /** @copybrief getLambda @see getLambda */\n    virtual void setLambda(double val) = 0;\n    //! @brief Weight parameter for (u - v)^2, tightness parameter\n    /** @see setTheta */\n    virtual double getTheta() const = 0;\n    /** @copybrief getTheta @see getTheta */\n    virtual void setTheta(double val) = 0;\n    //! @brief coefficient for additional illumination variation term\n    /** @see setGamma */\n    virtual double getGamma() const = 0;\n    /** @copybrief getGamma @see getGamma */\n    virtual void setGamma(double val) = 0;\n    //! @brief Number of scales used to create the pyramid of images\n    /** @see setScalesNumber */\n    virtual int getScalesNumber() const = 0;\n    /** @copybrief getScalesNumber @see getScalesNumber */\n    virtual void setScalesNumber(int val) = 0;\n    //! @brief Number of warpings per scale\n    /** @see setWarpingsNumber */\n    virtual int getWarpingsNumber() const = 0;\n    /** @copybrief getWarpingsNumber @see getWarpingsNumber */\n    virtual void setWarpingsNumber(int val) = 0;\n    //! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time\n    /** @see setEpsilon */\n    virtual double getEpsilon() const = 0;\n    /** @copybrief getEpsilon @see getEpsilon */\n    virtual void setEpsilon(double val) = 0;\n    //! @brief Inner iterations (between outlier filtering) used in the numerical scheme\n    /** @see setInnerIterations */\n    virtual int getInnerIterations() const = 0;\n    /** @copybrief getInnerIterations @see getInnerIterations */\n    virtual void setInnerIterations(int val) = 0;\n    //! @brief Outer iterations (number of inner loops) used in the numerical scheme\n    /** @see setOuterIterations */\n    virtual int getOuterIterations() const = 0;\n    /** @copybrief getOuterIterations @see getOuterIterations */\n    virtual void setOuterIterations(int val) = 0;\n    //! @brief Use initial flow\n    /** @see setUseInitialFlow */\n    virtual bool getUseInitialFlow() const = 0;\n    /** @copybrief getUseInitialFlow @see getUseInitialFlow */\n    virtual void setUseInitialFlow(bool val) = 0;\n    //! @brief Step between scales (<1)\n    /** @see setScaleStep */\n    virtual double getScaleStep() const = 0;\n    /** @copybrief getScaleStep @see getScaleStep */\n    virtual void setScaleStep(double val) = 0;\n    //! @brief Median filter kernel size (1 = no filter) (3 or 5)\n    /** @see setMedianFiltering */\n    virtual int getMedianFiltering() const = 0;\n    /** @copybrief getMedianFiltering @see getMedianFiltering */\n    virtual void setMedianFiltering(int val) = 0;\n};\n\n/** @brief Creates instance of cv::DenseOpticalFlow\n*/\nCV_EXPORTS_W Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();\n\n//! @} video_track\n\n} // cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/video/tracking_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_TRACKING_C_H__\n#define __OPENCV_TRACKING_C_H__\n\n#include \"opencv2/imgproc/types_c.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/** @addtogroup video_c\n  @{\n*/\n\n/****************************************************************************************\\\n*                                  Motion Analysis                                       *\n\\****************************************************************************************/\n\n/************************************ optical flow ***************************************/\n\n#define CV_LKFLOW_PYR_A_READY       1\n#define CV_LKFLOW_PYR_B_READY       2\n#define CV_LKFLOW_INITIAL_GUESSES   4\n#define CV_LKFLOW_GET_MIN_EIGENVALS 8\n\n/* It is Lucas & Kanade method, modified to use pyramids.\n   Also it does several iterations to get optical flow for\n   every point at every pyramid level.\n   Calculates optical flow between two images for certain set of points (i.e.\n   it is a \"sparse\" optical flow, which is opposite to the previous 3 methods) */\nCVAPI(void)  cvCalcOpticalFlowPyrLK( const CvArr*  prev, const CvArr*  curr,\n                                     CvArr*  prev_pyr, CvArr*  curr_pyr,\n                                     const CvPoint2D32f* prev_features,\n                                     CvPoint2D32f* curr_features,\n                                     int       count,\n                                     CvSize    win_size,\n                                     int       level,\n                                     char*     status,\n                                     float*    track_error,\n                                     CvTermCriteria criteria,\n                                     int       flags );\n\n\n/* Modification of a previous sparse optical flow algorithm to calculate\n   affine flow */\nCVAPI(void)  cvCalcAffineFlowPyrLK( const CvArr*  prev, const CvArr*  curr,\n                                    CvArr*  prev_pyr, CvArr*  curr_pyr,\n                                    const CvPoint2D32f* prev_features,\n                                    CvPoint2D32f* curr_features,\n                                    float* matrices, int  count,\n                                    CvSize win_size, int  level,\n                                    char* status, float* track_error,\n                                    CvTermCriteria criteria, int flags );\n\n/* Estimate rigid transformation between 2 images or 2 point sets */\nCVAPI(int)  cvEstimateRigidTransform( const CvArr* A, const CvArr* B,\n                                      CvMat* M, int full_affine );\n\n/* Estimate optical flow for each pixel using the two-frame G. Farneback algorithm */\nCVAPI(void) cvCalcOpticalFlowFarneback( const CvArr* prev, const CvArr* next,\n                                        CvArr* flow, double pyr_scale, int levels,\n                                        int winsize, int iterations, int poly_n,\n                                        double poly_sigma, int flags );\n\n/********************************* motion templates *************************************/\n\n/****************************************************************************************\\\n*        All the motion template functions work only with single channel images.         *\n*        Silhouette image must have depth IPL_DEPTH_8U or IPL_DEPTH_8S                   *\n*        Motion history image must have depth IPL_DEPTH_32F,                             *\n*        Gradient mask - IPL_DEPTH_8U or IPL_DEPTH_8S,                                   *\n*        Motion orientation image - IPL_DEPTH_32F                                        *\n*        Segmentation mask - IPL_DEPTH_32F                                               *\n*        All the angles are in degrees, all the times are in milliseconds                *\n\\****************************************************************************************/\n\n/* Updates motion history image given motion silhouette */\nCVAPI(void)    cvUpdateMotionHistory( const CvArr* silhouette, CvArr* mhi,\n                                      double timestamp, double duration );\n\n/* Calculates gradient of the motion history image and fills\n   a mask indicating where the gradient is valid */\nCVAPI(void)    cvCalcMotionGradient( const CvArr* mhi, CvArr* mask, CvArr* orientation,\n                                     double delta1, double delta2,\n                                     int aperture_size CV_DEFAULT(3));\n\n/* Calculates average motion direction within a selected motion region\n   (region can be selected by setting ROIs and/or by composing a valid gradient mask\n   with the region mask) */\nCVAPI(double)  cvCalcGlobalOrientation( const CvArr* orientation, const CvArr* mask,\n                                        const CvArr* mhi, double timestamp,\n                                        double duration );\n\n/* Splits a motion history image into a few parts corresponding to separate independent motions\n   (e.g. left hand, right hand) */\nCVAPI(CvSeq*)  cvSegmentMotion( const CvArr* mhi, CvArr* seg_mask,\n                                CvMemStorage* storage,\n                                double timestamp, double seg_thresh );\n\n/****************************************************************************************\\\n*                                       Tracking                                         *\n\\****************************************************************************************/\n\n/* Implements CAMSHIFT algorithm - determines object position, size and orientation\n   from the object histogram back project (extension of meanshift) */\nCVAPI(int)  cvCamShift( const CvArr* prob_image, CvRect  window,\n                        CvTermCriteria criteria, CvConnectedComp* comp,\n                        CvBox2D* box CV_DEFAULT(NULL) );\n\n/* Implements MeanShift algorithm - determines object position\n   from the object histogram back project */\nCVAPI(int)  cvMeanShift( const CvArr* prob_image, CvRect  window,\n                         CvTermCriteria criteria, CvConnectedComp* comp );\n\n/*\nstandard Kalman filter (in G. Welch' and G. Bishop's notation):\n\n  x(k)=A*x(k-1)+B*u(k)+w(k)  p(w)~N(0,Q)\n  z(k)=H*x(k)+v(k),   p(v)~N(0,R)\n*/\ntypedef struct CvKalman\n{\n    int MP;                     /* number of measurement vector dimensions */\n    int DP;                     /* number of state vector dimensions */\n    int CP;                     /* number of control vector dimensions */\n\n    /* backward compatibility fields */\n#if 1\n    float* PosterState;         /* =state_pre->data.fl */\n    float* PriorState;          /* =state_post->data.fl */\n    float* DynamMatr;           /* =transition_matrix->data.fl */\n    float* MeasurementMatr;     /* =measurement_matrix->data.fl */\n    float* MNCovariance;        /* =measurement_noise_cov->data.fl */\n    float* PNCovariance;        /* =process_noise_cov->data.fl */\n    float* KalmGainMatr;        /* =gain->data.fl */\n    float* PriorErrorCovariance;/* =error_cov_pre->data.fl */\n    float* PosterErrorCovariance;/* =error_cov_post->data.fl */\n    float* Temp1;               /* temp1->data.fl */\n    float* Temp2;               /* temp2->data.fl */\n#endif\n\n    CvMat* state_pre;           /* predicted state (x'(k)):\n                                    x(k)=A*x(k-1)+B*u(k) */\n    CvMat* state_post;          /* corrected state (x(k)):\n                                    x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) */\n    CvMat* transition_matrix;   /* state transition matrix (A) */\n    CvMat* control_matrix;      /* control matrix (B)\n                                   (it is not used if there is no control)*/\n    CvMat* measurement_matrix;  /* measurement matrix (H) */\n    CvMat* process_noise_cov;   /* process noise covariance matrix (Q) */\n    CvMat* measurement_noise_cov; /* measurement noise covariance matrix (R) */\n    CvMat* error_cov_pre;       /* priori error estimate covariance matrix (P'(k)):\n                                    P'(k)=A*P(k-1)*At + Q)*/\n    CvMat* gain;                /* Kalman gain matrix (K(k)):\n                                    K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)*/\n    CvMat* error_cov_post;      /* posteriori error estimate covariance matrix (P(k)):\n                                    P(k)=(I-K(k)*H)*P'(k) */\n    CvMat* temp1;               /* temporary matrices */\n    CvMat* temp2;\n    CvMat* temp3;\n    CvMat* temp4;\n    CvMat* temp5;\n} CvKalman;\n\n/* Creates Kalman filter and sets A, B, Q, R and state to some initial values */\nCVAPI(CvKalman*) cvCreateKalman( int dynam_params, int measure_params,\n                                 int control_params CV_DEFAULT(0));\n\n/* Releases Kalman filter state */\nCVAPI(void)  cvReleaseKalman( CvKalman** kalman);\n\n/* Updates Kalman filter by time (predicts future state of the system) */\nCVAPI(const CvMat*)  cvKalmanPredict( CvKalman* kalman,\n                                      const CvMat* control CV_DEFAULT(NULL));\n\n/* Updates Kalman filter by measurement\n   (corrects state of the system and internal matrices) */\nCVAPI(const CvMat*)  cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement );\n\n#define cvKalmanUpdateByTime  cvKalmanPredict\n#define cvKalmanUpdateByMeasurement cvKalmanCorrect\n\n/** @} video_c */\n\n#ifdef __cplusplus\n} // extern \"C\"\n#endif\n\n\n#endif // __OPENCV_TRACKING_C_H__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/video/video.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/video.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/video.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEO_HPP__\n#define __OPENCV_VIDEO_HPP__\n\n/**\n  @defgroup video Video Analysis\n  @{\n    @defgroup video_motion Motion Analysis\n    @defgroup video_track Object Tracking\n    @defgroup video_c C API\n  @}\n*/\n\n#include \"opencv2/video/tracking.hpp\"\n#include \"opencv2/video/background_segm.hpp\"\n\n#ifndef DISABLE_OPENCV_24_COMPATIBILITY\n#include \"opencv2/video/tracking_c.h\"\n#endif\n\n#endif //__OPENCV_VIDEO_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videoio/cap_ios.h",
    "content": "/*  For iOS video I/O\n *  by Eduard Feicho on 29/07/12\n *  Copyright 2012. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice,\n *    this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright notice,\n *    this list of conditions and the following disclaimer in the documentation\n *    and/or other materials provided with the distribution.\n * 3. The name of the author may not be used to endorse or promote products\n *    derived from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR \"AS IS\" AND ANY EXPRESS OR IMPLIED\n * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\n * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#import <UIKit/UIKit.h>\n#import <Accelerate/Accelerate.h>\n#import <AVFoundation/AVFoundation.h>\n#import <ImageIO/ImageIO.h>\n#include \"opencv2/core.hpp\"\n\n//! @addtogroup videoio_ios\n//! @{\n\n/////////////////////////////////////// CvAbstractCamera /////////////////////////////////////\n\n@class CvAbstractCamera;\n\n@interface CvAbstractCamera : NSObject\n{\n    AVCaptureSession* captureSession;\n    AVCaptureConnection* videoCaptureConnection;\n    AVCaptureVideoPreviewLayer *captureVideoPreviewLayer;\n\n    UIDeviceOrientation currentDeviceOrientation;\n\n    BOOL cameraAvailable;\n    BOOL captureSessionLoaded;\n    BOOL running;\n    BOOL useAVCaptureVideoPreviewLayer;\n\n    AVCaptureDevicePosition defaultAVCaptureDevicePosition;\n    AVCaptureVideoOrientation defaultAVCaptureVideoOrientation;\n    NSString *const defaultAVCaptureSessionPreset;\n\n    int defaultFPS;\n\n    UIView* parentView;\n\n    int imageWidth;\n    int imageHeight;\n}\n\n@property (nonatomic, retain) AVCaptureSession* captureSession;\n@property (nonatomic, retain) AVCaptureConnection* videoCaptureConnection;\n\n@property (nonatomic, readonly) BOOL running;\n@property (nonatomic, readonly) BOOL captureSessionLoaded;\n\n@property (nonatomic, assign) int defaultFPS;\n@property (nonatomic, readonly) AVCaptureVideoPreviewLayer *captureVideoPreviewLayer;\n@property (nonatomic, assign) AVCaptureDevicePosition defaultAVCaptureDevicePosition;\n@property (nonatomic, assign) AVCaptureVideoOrientation defaultAVCaptureVideoOrientation;\n@property (nonatomic, assign) BOOL useAVCaptureVideoPreviewLayer;\n@property (nonatomic, strong) NSString *const defaultAVCaptureSessionPreset;\n\n@property (nonatomic, assign) int imageWidth;\n@property (nonatomic, assign) int imageHeight;\n\n@property (nonatomic, retain) UIView* parentView;\n\n- (void)start;\n- (void)stop;\n- (void)switchCameras;\n\n- (id)initWithParentView:(UIView*)parent;\n\n- (void)createCaptureOutput;\n- (void)createVideoPreviewLayer;\n- (void)updateOrientation;\n\n- (void)lockFocus;\n- (void)unlockFocus;\n- (void)lockExposure;\n- (void)unlockExposure;\n- (void)lockBalance;\n- (void)unlockBalance;\n\n@end\n\n///////////////////////////////// CvVideoCamera ///////////////////////////////////////////\n\n@class CvVideoCamera;\n\n@protocol CvVideoCameraDelegate <NSObject>\n\n#ifdef __cplusplus\n// delegate method for processing image frames\n- (void)processImage:(cv::Mat&)image;\n#endif\n\n@end\n\n@interface CvVideoCamera : CvAbstractCamera<AVCaptureVideoDataOutputSampleBufferDelegate>\n{\n    AVCaptureVideoDataOutput *videoDataOutput;\n\n    dispatch_queue_t videoDataOutputQueue;\n    CALayer *customPreviewLayer;\n\n    BOOL grayscaleMode;\n\n    BOOL recordVideo;\n    BOOL rotateVideo;\n    AVAssetWriterInput* recordAssetWriterInput;\n    AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;\n    AVAssetWriter* recordAssetWriter;\n\n    CMTime lastSampleTime;\n\n}\n\n@property (nonatomic, assign) id<CvVideoCameraDelegate> delegate;\n@property (nonatomic, assign) BOOL grayscaleMode;\n\n@property (nonatomic, assign) BOOL recordVideo;\n@property (nonatomic, assign) BOOL rotateVideo;\n@property (nonatomic, retain) AVAssetWriterInput* recordAssetWriterInput;\n@property (nonatomic, retain) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;\n@property (nonatomic, retain) AVAssetWriter* recordAssetWriter;\n\n- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation;\n- (void)layoutPreviewLayer;\n- (void)saveVideo;\n- (NSURL *)videoFileURL;\n- (NSString *)videoFileString;\n\n\n@end\n\n///////////////////////////////// CvPhotoCamera ///////////////////////////////////////////\n\n@class CvPhotoCamera;\n\n@protocol CvPhotoCameraDelegate <NSObject>\n\n- (void)photoCamera:(CvPhotoCamera*)photoCamera capturedImage:(UIImage *)image;\n- (void)photoCameraCancel:(CvPhotoCamera*)photoCamera;\n\n@end\n\n@interface CvPhotoCamera : CvAbstractCamera\n{\n    AVCaptureStillImageOutput *stillImageOutput;\n}\n\n@property (nonatomic, assign) id<CvPhotoCameraDelegate> delegate;\n\n- (void)takePicture;\n\n@end\n\n//! @} videoio_ios\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videoio/videoio.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Copyright (C) 2013, OpenCV Foundation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifdef __OPENCV_BUILD\n#error this is a compatibility header which should not be used inside the OpenCV library\n#endif\n\n#include \"opencv2/videoio.hpp\"\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videoio/videoio_c.h",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                        Intel License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000, Intel Corporation, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of Intel Corporation may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOIO_H__\n#define __OPENCV_VIDEOIO_H__\n\n#include \"opencv2/core/core_c.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n/**\n  @addtogroup videoio_c\n  @{\n*/\n\n/****************************************************************************************\\\n*                         Working with Video Files and Cameras                           *\n\\****************************************************************************************/\n\n/* \"black box\" capture structure */\ntypedef struct CvCapture CvCapture;\n\n/* start capturing frames from video file */\nCVAPI(CvCapture*) cvCreateFileCapture( const char* filename );\n\n/* start capturing frames from video file. allows specifying a preferred API to use */\nCVAPI(CvCapture*) cvCreateFileCaptureWithPreference( const char* filename , int apiPreference);\n\nenum\n{\n    CV_CAP_ANY      =0,     // autodetect\n\n    CV_CAP_MIL      =100,   // MIL proprietary drivers\n\n    CV_CAP_VFW      =200,   // platform native\n    CV_CAP_V4L      =200,\n    CV_CAP_V4L2     =200,\n\n    CV_CAP_FIREWARE =300,   // IEEE 1394 drivers\n    CV_CAP_FIREWIRE =300,\n    CV_CAP_IEEE1394 =300,\n    CV_CAP_DC1394   =300,\n    CV_CAP_CMU1394  =300,\n\n    CV_CAP_STEREO   =400,   // TYZX proprietary drivers\n    CV_CAP_TYZX     =400,\n    CV_TYZX_LEFT    =400,\n    CV_TYZX_RIGHT   =401,\n    CV_TYZX_COLOR   =402,\n    CV_TYZX_Z       =403,\n\n    CV_CAP_QT       =500,   // QuickTime\n\n    CV_CAP_UNICAP   =600,   // Unicap drivers\n\n    CV_CAP_DSHOW    =700,   // DirectShow (via videoInput)\n    CV_CAP_MSMF     =1400,  // Microsoft Media Foundation (via videoInput)\n\n    CV_CAP_PVAPI    =800,   // PvAPI, Prosilica GigE SDK\n\n    CV_CAP_OPENNI   =900,   // OpenNI (for Kinect)\n    CV_CAP_OPENNI_ASUS =910,   // OpenNI (for Asus Xtion)\n\n    CV_CAP_ANDROID  =1000,  // Android - not used\n    CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera - not used\n    CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera - not used\n\n    CV_CAP_XIAPI    =1100,   // XIMEA Camera API\n\n    CV_CAP_AVFOUNDATION = 1200,  // AVFoundation framework for iOS (OS X Lion will have the same API)\n\n    CV_CAP_GIGANETIX = 1300,  // Smartek Giganetix GigEVisionSDK\n\n    CV_CAP_INTELPERC = 1500, // Intel Perceptual Computing\n\n    CV_CAP_OPENNI2 = 1600,   // OpenNI2 (for Kinect)\n    CV_CAP_GPHOTO2 = 1700,\n    CV_CAP_GSTREAMER = 1800, // GStreamer\n    CV_CAP_FFMPEG = 1900,    // FFMPEG\n    CV_CAP_IMAGES = 2000     // OpenCV Image Sequence (e.g. img_%02d.jpg)\n};\n\n/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */\nCVAPI(CvCapture*) cvCreateCameraCapture( int index );\n\n/* grab a frame, return 1 on success, 0 on fail.\n  this function is thought to be fast               */\nCVAPI(int) cvGrabFrame( CvCapture* capture );\n\n/* get the frame grabbed with cvGrabFrame(..)\n  This function may apply some frame processing like\n  frame decompression, flipping etc.\n  !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */\nCVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) );\n\n/* Just a combination of cvGrabFrame and cvRetrieveFrame\n   !!!DO NOT RELEASE or MODIFY the retrieved frame!!!      */\nCVAPI(IplImage*) cvQueryFrame( CvCapture* capture );\n\n/* stop capturing/reading and free resources */\nCVAPI(void) cvReleaseCapture( CvCapture** capture );\n\nenum\n{\n    // modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)\n    // every feature can have only one mode turned on at a time\n    CV_CAP_PROP_DC1394_OFF         = -4,  //turn the feature off (not controlled manually nor automatically)\n    CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user\n    CV_CAP_PROP_DC1394_MODE_AUTO = -2,\n    CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,\n    CV_CAP_PROP_POS_MSEC       =0,\n    CV_CAP_PROP_POS_FRAMES     =1,\n    CV_CAP_PROP_POS_AVI_RATIO  =2,\n    CV_CAP_PROP_FRAME_WIDTH    =3,\n    CV_CAP_PROP_FRAME_HEIGHT   =4,\n    CV_CAP_PROP_FPS            =5,\n    CV_CAP_PROP_FOURCC         =6,\n    CV_CAP_PROP_FRAME_COUNT    =7,\n    CV_CAP_PROP_FORMAT         =8,\n    CV_CAP_PROP_MODE           =9,\n    CV_CAP_PROP_BRIGHTNESS    =10,\n    CV_CAP_PROP_CONTRAST      =11,\n    CV_CAP_PROP_SATURATION    =12,\n    CV_CAP_PROP_HUE           =13,\n    CV_CAP_PROP_GAIN          =14,\n    CV_CAP_PROP_EXPOSURE      =15,\n    CV_CAP_PROP_CONVERT_RGB   =16,\n    CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17,\n    CV_CAP_PROP_RECTIFICATION =18,\n    CV_CAP_PROP_MONOCHROME    =19,\n    CV_CAP_PROP_SHARPNESS     =20,\n    CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera,\n                                   // user can adjust refernce level\n                                   // using this feature\n    CV_CAP_PROP_GAMMA         =22,\n    CV_CAP_PROP_TEMPERATURE   =23,\n    CV_CAP_PROP_TRIGGER       =24,\n    CV_CAP_PROP_TRIGGER_DELAY =25,\n    CV_CAP_PROP_WHITE_BALANCE_RED_V =26,\n    CV_CAP_PROP_ZOOM          =27,\n    CV_CAP_PROP_FOCUS         =28,\n    CV_CAP_PROP_GUID          =29,\n    CV_CAP_PROP_ISO_SPEED     =30,\n    CV_CAP_PROP_MAX_DC1394    =31,\n    CV_CAP_PROP_BACKLIGHT     =32,\n    CV_CAP_PROP_PAN           =33,\n    CV_CAP_PROP_TILT          =34,\n    CV_CAP_PROP_ROLL          =35,\n    CV_CAP_PROP_IRIS          =36,\n    CV_CAP_PROP_SETTINGS      =37,\n    CV_CAP_PROP_BUFFERSIZE    =38,\n    CV_CAP_PROP_AUTOFOCUS     =39,\n    CV_CAP_PROP_SAR_NUM       =40,\n    CV_CAP_PROP_SAR_DEN       =41,\n\n    CV_CAP_PROP_AUTOGRAB      =1024, // property for videoio class CvCapture_Android only\n    CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed\n    CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed\n\n    // OpenNI map generators\n    CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,\n    CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,\n    CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR,\n\n    // Properties of cameras available through OpenNI interfaces\n    CV_CAP_PROP_OPENNI_OUTPUT_MODE     = 100,\n    CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm\n    CV_CAP_PROP_OPENNI_BASELINE        = 102, // in mm\n    CV_CAP_PROP_OPENNI_FOCAL_LENGTH    = 103, // in pixels\n    CV_CAP_PROP_OPENNI_REGISTRATION    = 104, // flag\n    CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map\n                                                                          // by changing depth generator's view point (if the flag is \"on\") or\n                                                                          // sets this view point to its normal one (if the flag is \"off\").\n    CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,\n    CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE   = 106,\n    CV_CAP_PROP_OPENNI_CIRCLE_BUFFER     = 107,\n    CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,\n\n    CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109,\n    CV_CAP_PROP_OPENNI2_SYNC = 110,\n    CV_CAP_PROP_OPENNI2_MIRROR = 111,\n\n    CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT         = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT,\n    CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE     = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE,\n    CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE        = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE,\n    CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH    = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,\n    CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION    = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,\n    CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,\n\n    // Properties of cameras available through GStreamer interface\n    CV_CAP_GSTREAMER_QUEUE_LENGTH           = 200, // default is 1\n\n    // PVAPI\n    CV_CAP_PROP_PVAPI_MULTICASTIP           = 300, // ip for anable multicast master mode. 0 for disable multicast\n    CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated\n    CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL  = 302, // Horizontal sub-sampling of the image\n    CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL    = 303, // Vertical sub-sampling of the image\n    CV_CAP_PROP_PVAPI_BINNINGX              = 304, // Horizontal binning factor\n    CV_CAP_PROP_PVAPI_BINNINGY              = 305, // Vertical binning factor\n    CV_CAP_PROP_PVAPI_PIXELFORMAT           = 306, // Pixel format\n\n    // Properties of cameras available through XIMEA SDK interface\n    CV_CAP_PROP_XI_DOWNSAMPLING                                 = 400, // Change image resolution by binning or skipping.\n    CV_CAP_PROP_XI_DATA_FORMAT                                  = 401, // Output data format.\n    CV_CAP_PROP_XI_OFFSET_X                                     = 402, // Horizontal offset from the origin to the area of interest (in pixels).\n    CV_CAP_PROP_XI_OFFSET_Y                                     = 403, // Vertical offset from the origin to the area of interest (in pixels).\n    CV_CAP_PROP_XI_TRG_SOURCE                                   = 404, // Defines source of trigger.\n    CV_CAP_PROP_XI_TRG_SOFTWARE                                 = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.\n    CV_CAP_PROP_XI_GPI_SELECTOR                                 = 406, // Selects general purpose input\n    CV_CAP_PROP_XI_GPI_MODE                                     = 407, // Set general purpose input mode\n    CV_CAP_PROP_XI_GPI_LEVEL                                    = 408, // Get general purpose level\n    CV_CAP_PROP_XI_GPO_SELECTOR                                 = 409, // Selects general purpose output\n    CV_CAP_PROP_XI_GPO_MODE                                     = 410, // Set general purpose output mode\n    CV_CAP_PROP_XI_LED_SELECTOR                                 = 411, // Selects camera signalling LED\n    CV_CAP_PROP_XI_LED_MODE                                     = 412, // Define camera signalling LED functionality\n    CV_CAP_PROP_XI_MANUAL_WB                                    = 413, // Calculates White Balance(must be called during acquisition)\n    CV_CAP_PROP_XI_AUTO_WB                                      = 414, // Automatic white balance\n    CV_CAP_PROP_XI_AEAG                                         = 415, // Automatic exposure/gain\n    CV_CAP_PROP_XI_EXP_PRIORITY                                 = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).\n    CV_CAP_PROP_XI_AE_MAX_LIMIT                                 = 417, // Maximum limit of exposure in AEAG procedure\n    CV_CAP_PROP_XI_AG_MAX_LIMIT                                 = 418,  // Maximum limit of gain in AEAG procedure\n    CV_CAP_PROP_XI_AEAG_LEVEL                                   = 419, // Average intensity of output signal AEAG should achieve(in %)\n    CV_CAP_PROP_XI_TIMEOUT                                      = 420, // Image capture timeout in milliseconds\n    CV_CAP_PROP_XI_EXPOSURE                                     = 421, // Exposure time in microseconds\n    CV_CAP_PROP_XI_EXPOSURE_BURST_COUNT                         = 422, // Sets the number of times of exposure in one frame.\n    CV_CAP_PROP_XI_GAIN_SELECTOR                                = 423, // Gain selector for parameter Gain allows to select different type of gains.\n    CV_CAP_PROP_XI_GAIN                                         = 424, // Gain in dB\n    CV_CAP_PROP_XI_DOWNSAMPLING_TYPE                            = 426, // Change image downsampling type.\n    CV_CAP_PROP_XI_BINNING_SELECTOR                             = 427, // Binning engine selector.\n    CV_CAP_PROP_XI_BINNING_VERTICAL                             = 428, // Vertical Binning - number of vertical photo-sensitive cells to combine together.\n    CV_CAP_PROP_XI_BINNING_HORIZONTAL                           = 429, // Horizontal Binning - number of horizontal photo-sensitive cells to combine together.\n    CV_CAP_PROP_XI_BINNING_PATTERN                              = 430, // Binning pattern type.\n    CV_CAP_PROP_XI_DECIMATION_SELECTOR                          = 431, // Decimation engine selector.\n    CV_CAP_PROP_XI_DECIMATION_VERTICAL                          = 432, // Vertical Decimation - vertical sub-sampling of the image - reduces the vertical resolution of the image by the specified vertical decimation factor.\n    CV_CAP_PROP_XI_DECIMATION_HORIZONTAL                        = 433, // Horizontal Decimation - horizontal sub-sampling of the image - reduces the horizontal resolution of the image by the specified vertical decimation factor.\n    CV_CAP_PROP_XI_DECIMATION_PATTERN                           = 434, // Decimation pattern type.\n    CV_CAP_PROP_XI_IMAGE_DATA_FORMAT                            = 435, // Output data format.\n    CV_CAP_PROP_XI_SHUTTER_TYPE                                 = 436, // Change sensor shutter type(CMOS sensor).\n    CV_CAP_PROP_XI_SENSOR_TAPS                                  = 437, // Number of taps\n    CV_CAP_PROP_XI_AEAG_ROI_OFFSET_X                            = 439, // Automatic exposure/gain ROI offset X\n    CV_CAP_PROP_XI_AEAG_ROI_OFFSET_Y                            = 440, // Automatic exposure/gain ROI offset Y\n    CV_CAP_PROP_XI_AEAG_ROI_WIDTH                               = 441, // Automatic exposure/gain ROI Width\n    CV_CAP_PROP_XI_AEAG_ROI_HEIGHT                              = 442, // Automatic exposure/gain ROI Height\n    CV_CAP_PROP_XI_BPC                                          = 445, // Correction of bad pixels\n    CV_CAP_PROP_XI_WB_KR                                        = 448, // White balance red coefficient\n    CV_CAP_PROP_XI_WB_KG                                        = 449, // White balance green coefficient\n    CV_CAP_PROP_XI_WB_KB                                        = 450, // White balance blue coefficient\n    CV_CAP_PROP_XI_WIDTH                                        = 451, // Width of the Image provided by the device (in pixels).\n    CV_CAP_PROP_XI_HEIGHT                                       = 452, // Height of the Image provided by the device (in pixels).\n    CV_CAP_PROP_XI_LIMIT_BANDWIDTH                              = 459, // Set/get bandwidth(datarate)(in Megabits)\n    CV_CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH                        = 460, // Sensor output data bit depth.\n    CV_CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH                        = 461, // Device output data bit depth.\n    CV_CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH                         = 462, // bitdepth of data returned by function xiGetImage\n    CV_CAP_PROP_XI_OUTPUT_DATA_PACKING                          = 463, // Device output data packing (or grouping) enabled. Packing could be enabled if output_data_bit_depth > 8 and packing capability is available.\n    CV_CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE                     = 464, // Data packing type. Some cameras supports only specific packing type.\n    CV_CAP_PROP_XI_IS_COOLED                                    = 465, // Returns 1 for cameras that support cooling.\n    CV_CAP_PROP_XI_COOLING                                      = 466, // Start camera cooling.\n    CV_CAP_PROP_XI_TARGET_TEMP                                  = 467, // Set sensor target temperature for cooling.\n    CV_CAP_PROP_XI_CHIP_TEMP                                    = 468, // Camera sensor temperature\n    CV_CAP_PROP_XI_HOUS_TEMP                                    = 469, // Camera housing tepmerature\n    CV_CAP_PROP_XI_CMS                                          = 470, // Mode of color management system.\n    CV_CAP_PROP_XI_APPLY_CMS                                    = 471, // Enable applying of CMS profiles to xiGetImage (see XI_PRM_INPUT_CMS_PROFILE, XI_PRM_OUTPUT_CMS_PROFILE).\n    CV_CAP_PROP_XI_IMAGE_IS_COLOR                               = 474, // Returns 1 for color cameras.\n    CV_CAP_PROP_XI_COLOR_FILTER_ARRAY                           = 475, // Returns color filter array type of RAW data.\n    CV_CAP_PROP_XI_GAMMAY                                       = 476, // Luminosity gamma\n    CV_CAP_PROP_XI_GAMMAC                                       = 477, // Chromaticity gamma\n    CV_CAP_PROP_XI_SHARPNESS                                    = 478, // Sharpness Strenght\n    CV_CAP_PROP_XI_CC_MATRIX_00                                 = 479, // Color Correction Matrix element [0][0]\n    CV_CAP_PROP_XI_CC_MATRIX_01                                 = 480, // Color Correction Matrix element [0][1]\n    CV_CAP_PROP_XI_CC_MATRIX_02                                 = 481, // Color Correction Matrix element [0][2]\n    CV_CAP_PROP_XI_CC_MATRIX_03                                 = 482, // Color Correction Matrix element [0][3]\n    CV_CAP_PROP_XI_CC_MATRIX_10                                 = 483, // Color Correction Matrix element [1][0]\n    CV_CAP_PROP_XI_CC_MATRIX_11                                 = 484, // Color Correction Matrix element [1][1]\n    CV_CAP_PROP_XI_CC_MATRIX_12                                 = 485, // Color Correction Matrix element [1][2]\n    CV_CAP_PROP_XI_CC_MATRIX_13                                 = 486, // Color Correction Matrix element [1][3]\n    CV_CAP_PROP_XI_CC_MATRIX_20                                 = 487, // Color Correction Matrix element [2][0]\n    CV_CAP_PROP_XI_CC_MATRIX_21                                 = 488, // Color Correction Matrix element [2][1]\n    CV_CAP_PROP_XI_CC_MATRIX_22                                 = 489, // Color Correction Matrix element [2][2]\n    CV_CAP_PROP_XI_CC_MATRIX_23                                 = 490, // Color Correction Matrix element [2][3]\n    CV_CAP_PROP_XI_CC_MATRIX_30                                 = 491, // Color Correction Matrix element [3][0]\n    CV_CAP_PROP_XI_CC_MATRIX_31                                 = 492, // Color Correction Matrix element [3][1]\n    CV_CAP_PROP_XI_CC_MATRIX_32                                 = 493, // Color Correction Matrix element [3][2]\n    CV_CAP_PROP_XI_CC_MATRIX_33                                 = 494, // Color Correction Matrix element [3][3]\n    CV_CAP_PROP_XI_DEFAULT_CC_MATRIX                            = 495, // Set default Color Correction Matrix\n    CV_CAP_PROP_XI_TRG_SELECTOR                                 = 498, // Selects the type of trigger.\n    CV_CAP_PROP_XI_ACQ_FRAME_BURST_COUNT                        = 499, // Sets number of frames acquired by burst. This burst is used only if trigger is set to FrameBurstStart\n    CV_CAP_PROP_XI_DEBOUNCE_EN                                  = 507, // Enable/Disable debounce to selected GPI\n    CV_CAP_PROP_XI_DEBOUNCE_T0                                  = 508, // Debounce time (x * 10us)\n    CV_CAP_PROP_XI_DEBOUNCE_T1                                  = 509, // Debounce time (x * 10us)\n    CV_CAP_PROP_XI_DEBOUNCE_POL                                 = 510, // Debounce polarity (pol = 1 t0 - falling edge, t1 - rising edge)\n    CV_CAP_PROP_XI_LENS_MODE                                    = 511, // Status of lens control interface. This shall be set to XI_ON before any Lens operations.\n    CV_CAP_PROP_XI_LENS_APERTURE_VALUE                          = 512, // Current lens aperture value in stops. Examples: 2.8, 4, 5.6, 8, 11\n    CV_CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE                    = 513, // Lens current focus movement value to be used by XI_PRM_LENS_FOCUS_MOVE in motor steps.\n    CV_CAP_PROP_XI_LENS_FOCUS_MOVE                              = 514, // Moves lens focus motor by steps set in XI_PRM_LENS_FOCUS_MOVEMENT_VALUE.\n    CV_CAP_PROP_XI_LENS_FOCUS_DISTANCE                          = 515, // Lens focus distance in cm.\n    CV_CAP_PROP_XI_LENS_FOCAL_LENGTH                            = 516, // Lens focal distance in mm.\n    CV_CAP_PROP_XI_LENS_FEATURE_SELECTOR                        = 517, // Selects the current feature which is accessible by XI_PRM_LENS_FEATURE.\n    CV_CAP_PROP_XI_LENS_FEATURE                                 = 518, // Allows access to lens feature value currently selected by XI_PRM_LENS_FEATURE_SELECTOR.\n    CV_CAP_PROP_XI_DEVICE_MODEL_ID                              = 521, // Return device model id\n    CV_CAP_PROP_XI_DEVICE_SN                                    = 522, // Return device serial number\n    CV_CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA                = 529, // The alpha channel of RGB32 output image format.\n    CV_CAP_PROP_XI_IMAGE_PAYLOAD_SIZE                           = 530, // Buffer size in bytes sufficient for output image returned by xiGetImage\n    CV_CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT                       = 531, // Current format of pixels on transport layer.\n    CV_CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ                         = 532, // Sensor clock frequency in Hz.\n    CV_CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX                      = 533, // Sensor clock frequency index. Sensor with selected frequencies have possibility to set the frequency only by this index.\n    CV_CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT                  = 534, // Number of output channels from sensor used for data transfer.\n    CV_CAP_PROP_XI_FRAMERATE                                    = 535, // Define framerate in Hz\n    CV_CAP_PROP_XI_COUNTER_SELECTOR                             = 536, // Select counter\n    CV_CAP_PROP_XI_COUNTER_VALUE                                = 537, // Counter status\n    CV_CAP_PROP_XI_ACQ_TIMING_MODE                              = 538, // Type of sensor frames timing.\n    CV_CAP_PROP_XI_AVAILABLE_BANDWIDTH                          = 539, // Calculate and return available interface bandwidth(int Megabits)\n    CV_CAP_PROP_XI_BUFFER_POLICY                                = 540, // Data move policy\n    CV_CAP_PROP_XI_LUT_EN                                       = 541, // Activates LUT.\n    CV_CAP_PROP_XI_LUT_INDEX                                    = 542, // Control the index (offset) of the coefficient to access in the LUT.\n    CV_CAP_PROP_XI_LUT_VALUE                                    = 543, // Value at entry LUTIndex of the LUT\n    CV_CAP_PROP_XI_TRG_DELAY                                    = 544, // Specifies the delay in microseconds (us) to apply after the trigger reception before activating it.\n    CV_CAP_PROP_XI_TS_RST_MODE                                  = 545, // Defines how time stamp reset engine will be armed\n    CV_CAP_PROP_XI_TS_RST_SOURCE                                = 546, // Defines which source will be used for timestamp reset. Writing this parameter will trigger settings of engine (arming)\n    CV_CAP_PROP_XI_IS_DEVICE_EXIST                              = 547, // Returns 1 if camera connected and works properly.\n    CV_CAP_PROP_XI_ACQ_BUFFER_SIZE                              = 548, // Acquisition buffer size in buffer_size_unit. Default bytes.\n    CV_CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT                         = 549, // Acquisition buffer size unit in bytes. Default 1. E.g. Value 1024 means that buffer_size is in KiBytes\n    CV_CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE                    = 550, // Acquisition transport buffer size in bytes\n    CV_CAP_PROP_XI_BUFFERS_QUEUE_SIZE                           = 551, // Queue of field/frame buffers\n    CV_CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT                  = 552, // Number of buffers to commit to low level\n    CV_CAP_PROP_XI_RECENT_FRAME                                 = 553, // GetImage returns most recent frame\n    CV_CAP_PROP_XI_DEVICE_RESET                                 = 554, // Resets the camera to default state.\n    CV_CAP_PROP_XI_COLUMN_FPN_CORRECTION                        = 555, // Correction of column FPN\n    CV_CAP_PROP_XI_SENSOR_MODE                                  = 558, // Current sensor mode. Allows to select sensor mode by one integer. Setting of this parameter affects: image dimensions and downsampling.\n    CV_CAP_PROP_XI_HDR                                          = 559, // Enable High Dynamic Range feature.\n    CV_CAP_PROP_XI_HDR_KNEEPOINT_COUNT                          = 560, // The number of kneepoints in the PWLR.\n    CV_CAP_PROP_XI_HDR_T1                                       = 561, // position of first kneepoint(in % of XI_PRM_EXPOSURE)\n    CV_CAP_PROP_XI_HDR_T2                                       = 562, // position of second kneepoint (in % of XI_PRM_EXPOSURE)\n    CV_CAP_PROP_XI_KNEEPOINT1                                   = 563, // value of first kneepoint (% of sensor saturation)\n    CV_CAP_PROP_XI_KNEEPOINT2                                   = 564, // value of second kneepoint (% of sensor saturation)\n    CV_CAP_PROP_XI_IMAGE_BLACK_LEVEL                            = 565, // Last image black level counts. Can be used for Offline processing to recall it.\n    CV_CAP_PROP_XI_HW_REVISION                                  = 571, // Returns hardware revision number.\n    CV_CAP_PROP_XI_DEBUG_LEVEL                                  = 572, // Set debug level\n    CV_CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION                   = 573, // Automatic bandwidth calculation,\n    CV_CAP_PROP_XI_FREE_FFS_SIZE                                = 581, // Size of free camera FFS.\n    CV_CAP_PROP_XI_USED_FFS_SIZE                                = 582, // Size of used camera FFS.\n    CV_CAP_PROP_XI_FFS_ACCESS_KEY                               = 583, // Setting of key enables file operations on some cameras.\n    CV_CAP_PROP_XI_SENSOR_FEATURE_SELECTOR                      = 585, // Selects the current feature which is accessible by XI_PRM_SENSOR_FEATURE_VALUE.\n    CV_CAP_PROP_XI_SENSOR_FEATURE_VALUE                         = 586, // Allows access to sensor feature value currently selected by XI_PRM_SENSOR_FEATURE_SELECTOR.\n\n    // Properties for Android cameras\n    CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,\n    CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,\n    CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003,\n    CV_CAP_PROP_ANDROID_ANTIBANDING = 8004,\n    CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,\n    CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,\n    CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,\n    CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008,\n    CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009,\n    CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010,\n\n    // Properties of cameras available through AVFOUNDATION interface\n    CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001,\n    CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,\n    CV_CAP_PROP_IOS_DEVICE_FLASH = 9003,\n    CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,\n    CV_CAP_PROP_IOS_DEVICE_TORCH = 9005,\n\n    // Properties of cameras available through Smartek Giganetix Ethernet Vision interface\n    /* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */\n    CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,\n    CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,\n    CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,\n    CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,\n    CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,\n    CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006,\n\n    CV_CAP_PROP_INTELPERC_PROFILE_COUNT               = 11001,\n    CV_CAP_PROP_INTELPERC_PROFILE_IDX                 = 11002,\n    CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE  = 11003,\n    CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE      = 11004,\n    CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD  = 11005,\n    CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ     = 11006,\n    CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT     = 11007,\n\n    // Intel PerC streams\n    CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,\n    CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,\n    CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR\n};\n\n// Generic camera output modes.\n// Currently, these are supported through the libv4l interface only.\nenum\n{\n    CV_CAP_MODE_BGR  = 0, // BGR24 (default)\n    CV_CAP_MODE_RGB  = 1, // RGB24\n    CV_CAP_MODE_GRAY = 2, // Y8\n    CV_CAP_MODE_YUYV = 3  // YUYV\n};\n\nenum\n{\n    // Data given from depth generator.\n    CV_CAP_OPENNI_DEPTH_MAP                 = 0, // Depth values in mm (CV_16UC1)\n    CV_CAP_OPENNI_POINT_CLOUD_MAP           = 1, // XYZ in meters (CV_32FC3)\n    CV_CAP_OPENNI_DISPARITY_MAP             = 2, // Disparity in pixels (CV_8UC1)\n    CV_CAP_OPENNI_DISPARITY_MAP_32F         = 3, // Disparity in pixels (CV_32FC1)\n    CV_CAP_OPENNI_VALID_DEPTH_MASK          = 4, // CV_8UC1\n\n    // Data given from RGB image generator.\n    CV_CAP_OPENNI_BGR_IMAGE                 = 5,\n    CV_CAP_OPENNI_GRAY_IMAGE                = 6\n};\n\n// Supported output modes of OpenNI image generator\nenum\n{\n    CV_CAP_OPENNI_VGA_30HZ     = 0,\n    CV_CAP_OPENNI_SXGA_15HZ    = 1,\n    CV_CAP_OPENNI_SXGA_30HZ    = 2,\n    CV_CAP_OPENNI_QVGA_30HZ    = 3,\n    CV_CAP_OPENNI_QVGA_60HZ    = 4\n};\n\nenum\n{\n    CV_CAP_INTELPERC_DEPTH_MAP              = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.\n    CV_CAP_INTELPERC_UVDEPTH_MAP            = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.\n    CV_CAP_INTELPERC_IR_MAP                 = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.\n    CV_CAP_INTELPERC_IMAGE                  = 3\n};\n\n// gPhoto2 properties, if propertyId is less than 0 then work on widget with that __additive inversed__ camera setting ID\n// Get IDs by using CAP_PROP_GPHOTO2_WIDGET_ENUMERATE.\n// @see CvCaptureCAM_GPHOTO2 for more info\nenum\n{\n    CV_CAP_PROP_GPHOTO2_PREVIEW           = 17001, // Capture only preview from liveview mode.\n    CV_CAP_PROP_GPHOTO2_WIDGET_ENUMERATE  = 17002, // Readonly, returns (const char *).\n    CV_CAP_PROP_GPHOTO2_RELOAD_CONFIG     = 17003, // Trigger, only by set. Reload camera settings.\n    CV_CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE  = 17004, // Reload all settings on set.\n    CV_CAP_PROP_GPHOTO2_COLLECT_MSGS      = 17005, // Collect messages with details.\n    CV_CAP_PROP_GPHOTO2_FLUSH_MSGS        = 17006, // Readonly, returns (const char *).\n    CV_CAP_PROP_SPEED                     = 17007, // Exposure speed. Can be readonly, depends on camera program.\n    CV_CAP_PROP_APERTURE                  = 17008, // Aperture. Can be readonly, depends on camera program.\n    CV_CAP_PROP_EXPOSUREPROGRAM           = 17009, // Camera exposure program.\n    CV_CAP_PROP_VIEWFINDER                = 17010  // Enter liveview mode.\n};\n\n/* retrieve or set capture properties */\nCVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id );\nCVAPI(int)    cvSetCaptureProperty( CvCapture* capture, int property_id, double value );\n\n// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY\nCVAPI(int)    cvGetCaptureDomain( CvCapture* capture);\n\n/* \"black box\" video file writer structure */\ntypedef struct CvVideoWriter CvVideoWriter;\n\n#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))\n\nCV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)\n{\n    return CV_FOURCC_MACRO(c1, c2, c3, c4);\n}\n\n#define CV_FOURCC_PROMPT -1  /* Open Codec Selection Dialog (Windows only) */\n#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */\n\n/* initialize video file writer */\nCVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc,\n                                           double fps, CvSize frame_size,\n                                           int is_color CV_DEFAULT(1));\n\n/* write frame to video file */\nCVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image );\n\n/* close video file writer */\nCVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );\n\n/****************************************************************************************\\\n*                              Obsolete functions/synonyms                               *\n\\****************************************************************************************/\n\n#define cvCaptureFromFile cvCreateFileCapture\n#define cvCaptureFromCAM cvCreateCameraCapture\n#define cvCaptureFromAVI cvCaptureFromFile\n#define cvCreateAVIWriter cvCreateVideoWriter\n#define cvWriteToAVI cvWriteFrame\n\n/** @} videoio_c */\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif //__OPENCV_VIDEOIO_H__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videoio.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                          License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOIO_HPP__\n#define __OPENCV_VIDEOIO_HPP__\n\n#include \"opencv2/core.hpp\"\n\n/**\n  @defgroup videoio Media I/O\n  @{\n    @defgroup videoio_c C API\n    @defgroup videoio_ios iOS glue\n    @defgroup videoio_winrt WinRT glue\n  @}\n*/\n\n////////////////////////////////// video io /////////////////////////////////\n\ntypedef struct CvCapture CvCapture;\ntypedef struct CvVideoWriter CvVideoWriter;\n\nnamespace cv\n{\n\n//! @addtogroup videoio\n//! @{\n\n// Camera API\nenum { CAP_ANY          = 0,     // autodetect\n       CAP_VFW          = 200,   // platform native\n       CAP_V4L          = 200,\n       CAP_V4L2         = CAP_V4L,\n       CAP_FIREWARE     = 300,   // IEEE 1394 drivers\n       CAP_FIREWIRE     = CAP_FIREWARE,\n       CAP_IEEE1394     = CAP_FIREWARE,\n       CAP_DC1394       = CAP_FIREWARE,\n       CAP_CMU1394      = CAP_FIREWARE,\n       CAP_QT           = 500,   // QuickTime\n       CAP_UNICAP       = 600,   // Unicap drivers\n       CAP_DSHOW        = 700,   // DirectShow (via videoInput)\n       CAP_PVAPI        = 800,   // PvAPI, Prosilica GigE SDK\n       CAP_OPENNI       = 900,   // OpenNI (for Kinect)\n       CAP_OPENNI_ASUS  = 910,   // OpenNI (for Asus Xtion)\n       CAP_ANDROID      = 1000,  // Android - not used\n       CAP_XIAPI        = 1100,  // XIMEA Camera API\n       CAP_AVFOUNDATION = 1200,  // AVFoundation framework for iOS (OS X Lion will have the same API)\n       CAP_GIGANETIX    = 1300,  // Smartek Giganetix GigEVisionSDK\n       CAP_MSMF         = 1400,  // Microsoft Media Foundation (via videoInput)\n       CAP_WINRT        = 1410,  // Microsoft Windows Runtime using Media Foundation\n       CAP_INTELPERC    = 1500,  // Intel Perceptual Computing SDK\n       CAP_OPENNI2      = 1600,  // OpenNI2 (for Kinect)\n       CAP_OPENNI2_ASUS = 1610,  // OpenNI2 (for Asus Xtion and Occipital Structure sensors)\n       CAP_GPHOTO2      = 1700,  // gPhoto2 connection\n       CAP_GSTREAMER    = 1800,  // GStreamer\n       CAP_FFMPEG       = 1900,  // FFMPEG\n       CAP_IMAGES       = 2000   // OpenCV Image Sequence (e.g. img_%02d.jpg)\n     };\n\n// generic properties (based on DC1394 properties)\nenum { CAP_PROP_POS_MSEC       =0,\n       CAP_PROP_POS_FRAMES     =1,\n       CAP_PROP_POS_AVI_RATIO  =2,\n       CAP_PROP_FRAME_WIDTH    =3,\n       CAP_PROP_FRAME_HEIGHT   =4,\n       CAP_PROP_FPS            =5,\n       CAP_PROP_FOURCC         =6,\n       CAP_PROP_FRAME_COUNT    =7,\n       CAP_PROP_FORMAT         =8,\n       CAP_PROP_MODE           =9,\n       CAP_PROP_BRIGHTNESS    =10,\n       CAP_PROP_CONTRAST      =11,\n       CAP_PROP_SATURATION    =12,\n       CAP_PROP_HUE           =13,\n       CAP_PROP_GAIN          =14,\n       CAP_PROP_EXPOSURE      =15,\n       CAP_PROP_CONVERT_RGB   =16,\n       CAP_PROP_WHITE_BALANCE_BLUE_U =17,\n       CAP_PROP_RECTIFICATION =18,\n       CAP_PROP_MONOCHROME    =19,\n       CAP_PROP_SHARPNESS     =20,\n       CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature\n       CAP_PROP_GAMMA         =22,\n       CAP_PROP_TEMPERATURE   =23,\n       CAP_PROP_TRIGGER       =24,\n       CAP_PROP_TRIGGER_DELAY =25,\n       CAP_PROP_WHITE_BALANCE_RED_V =26,\n       CAP_PROP_ZOOM          =27,\n       CAP_PROP_FOCUS         =28,\n       CAP_PROP_GUID          =29,\n       CAP_PROP_ISO_SPEED     =30,\n       CAP_PROP_BACKLIGHT     =32,\n       CAP_PROP_PAN           =33,\n       CAP_PROP_TILT          =34,\n       CAP_PROP_ROLL          =35,\n       CAP_PROP_IRIS          =36,\n       CAP_PROP_SETTINGS      =37,\n       CAP_PROP_BUFFERSIZE    =38,\n       CAP_PROP_AUTOFOCUS     =39\n     };\n\n\n// Generic camera output modes.\n// Currently, these are supported through the libv4l interface only.\nenum { CAP_MODE_BGR  = 0, // BGR24 (default)\n       CAP_MODE_RGB  = 1, // RGB24\n       CAP_MODE_GRAY = 2, // Y8\n       CAP_MODE_YUYV = 3  // YUYV\n     };\n\n\n// DC1394 only\n// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)\n// every feature can have only one mode turned on at a time\nenum { CAP_PROP_DC1394_OFF                = -4, //turn the feature off (not controlled manually nor automatically)\n       CAP_PROP_DC1394_MODE_MANUAL        = -3, //set automatically when a value of the feature is set by the user\n       CAP_PROP_DC1394_MODE_AUTO          = -2,\n       CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,\n       CAP_PROP_DC1394_MAX                = 31\n     };\n\n\n// OpenNI map generators\nenum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,\n       CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,\n       CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR\n     };\n\n// Properties of cameras available through OpenNI interfaces\nenum { CAP_PROP_OPENNI_OUTPUT_MODE       = 100,\n       CAP_PROP_OPENNI_FRAME_MAX_DEPTH   = 101, // in mm\n       CAP_PROP_OPENNI_BASELINE          = 102, // in mm\n       CAP_PROP_OPENNI_FOCAL_LENGTH      = 103, // in pixels\n       CAP_PROP_OPENNI_REGISTRATION      = 104, // flag that synchronizes the remapping depth map to image map\n                                                // by changing depth generator's view point (if the flag is \"on\") or\n                                                // sets this view point to its normal one (if the flag is \"off\").\n       CAP_PROP_OPENNI_REGISTRATION_ON   = CAP_PROP_OPENNI_REGISTRATION,\n       CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,\n       CAP_PROP_OPENNI_MAX_BUFFER_SIZE   = 106,\n       CAP_PROP_OPENNI_CIRCLE_BUFFER     = 107,\n       CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,\n       CAP_PROP_OPENNI_GENERATOR_PRESENT = 109,\n       CAP_PROP_OPENNI2_SYNC             = 110,\n       CAP_PROP_OPENNI2_MIRROR           = 111\n     };\n\n// OpenNI shortcats\nenum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT         = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT,\n       CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE     = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE,\n       CAP_OPENNI_DEPTH_GENERATOR_BASELINE        = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE,\n       CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH    = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH,\n       CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION    = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION,\n       CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION\n     };\n\n// OpenNI data given from depth generator\nenum { CAP_OPENNI_DEPTH_MAP         = 0, // Depth values in mm (CV_16UC1)\n       CAP_OPENNI_POINT_CLOUD_MAP   = 1, // XYZ in meters (CV_32FC3)\n       CAP_OPENNI_DISPARITY_MAP     = 2, // Disparity in pixels (CV_8UC1)\n       CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)\n       CAP_OPENNI_VALID_DEPTH_MASK  = 4, // CV_8UC1\n\n       // Data given from RGB image generator\n       CAP_OPENNI_BGR_IMAGE         = 5,\n       CAP_OPENNI_GRAY_IMAGE        = 6\n     };\n\n// Supported output modes of OpenNI image generator\nenum { CAP_OPENNI_VGA_30HZ  = 0,\n       CAP_OPENNI_SXGA_15HZ = 1,\n       CAP_OPENNI_SXGA_30HZ = 2,\n       CAP_OPENNI_QVGA_30HZ = 3,\n       CAP_OPENNI_QVGA_60HZ = 4\n     };\n\n\n// GStreamer\nenum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 // default is 1\n     };\n\n\n// PVAPI\nenum { CAP_PROP_PVAPI_MULTICASTIP           = 300, // ip for anable multicast master mode. 0 for disable multicast\n       CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated\n       CAP_PROP_PVAPI_DECIMATIONHORIZONTAL  = 302, // Horizontal sub-sampling of the image\n       CAP_PROP_PVAPI_DECIMATIONVERTICAL    = 303, // Vertical sub-sampling of the image\n       CAP_PROP_PVAPI_BINNINGX              = 304, // Horizontal binning factor\n       CAP_PROP_PVAPI_BINNINGY              = 305, // Vertical binning factor\n       CAP_PROP_PVAPI_PIXELFORMAT           = 306  // Pixel format\n     };\n\n// PVAPI: FrameStartTriggerMode\nenum { CAP_PVAPI_FSTRIGMODE_FREERUN     = 0,    // Freerun\n       CAP_PVAPI_FSTRIGMODE_SYNCIN1     = 1,    // SyncIn1\n       CAP_PVAPI_FSTRIGMODE_SYNCIN2     = 2,    // SyncIn2\n       CAP_PVAPI_FSTRIGMODE_FIXEDRATE   = 3,    // FixedRate\n       CAP_PVAPI_FSTRIGMODE_SOFTWARE    = 4     // Software\n     };\n\n// PVAPI: DecimationHorizontal, DecimationVertical\nenum { CAP_PVAPI_DECIMATION_OFF       = 1,    // Off\n       CAP_PVAPI_DECIMATION_2OUTOF4   = 2,    // 2 out of 4 decimation\n       CAP_PVAPI_DECIMATION_2OUTOF8   = 4,    // 2 out of 8 decimation\n       CAP_PVAPI_DECIMATION_2OUTOF16  = 8     // 2 out of 16 decimation\n     };\n\n// PVAPI: PixelFormat\nenum { CAP_PVAPI_PIXELFORMAT_MONO8    = 1,    // Mono8\n       CAP_PVAPI_PIXELFORMAT_MONO16   = 2,    // Mono16\n       CAP_PVAPI_PIXELFORMAT_BAYER8   = 3,    // Bayer8\n       CAP_PVAPI_PIXELFORMAT_BAYER16  = 4,    // Bayer16\n       CAP_PVAPI_PIXELFORMAT_RGB24    = 5,    // Rgb24\n       CAP_PVAPI_PIXELFORMAT_BGR24    = 6,    // Bgr24\n       CAP_PVAPI_PIXELFORMAT_RGBA32   = 7,    // Rgba32\n       CAP_PVAPI_PIXELFORMAT_BGRA32   = 8,    // Bgra32\n     };\n\n// Properties of cameras available through XIMEA SDK interface\nenum { CAP_PROP_XI_DOWNSAMPLING  = 400, // Change image resolution by binning or skipping.\n       CAP_PROP_XI_DATA_FORMAT   = 401, // Output data format.\n       CAP_PROP_XI_OFFSET_X      = 402, // Horizontal offset from the origin to the area of interest (in pixels).\n       CAP_PROP_XI_OFFSET_Y      = 403, // Vertical offset from the origin to the area of interest (in pixels).\n       CAP_PROP_XI_TRG_SOURCE    = 404, // Defines source of trigger.\n       CAP_PROP_XI_TRG_SOFTWARE  = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.\n       CAP_PROP_XI_GPI_SELECTOR  = 406, // Selects general purpose input\n       CAP_PROP_XI_GPI_MODE      = 407, // Set general purpose input mode\n       CAP_PROP_XI_GPI_LEVEL     = 408, // Get general purpose level\n       CAP_PROP_XI_GPO_SELECTOR  = 409, // Selects general purpose output\n       CAP_PROP_XI_GPO_MODE      = 410, // Set general purpose output mode\n       CAP_PROP_XI_LED_SELECTOR  = 411, // Selects camera signalling LED\n       CAP_PROP_XI_LED_MODE      = 412, // Define camera signalling LED functionality\n       CAP_PROP_XI_MANUAL_WB     = 413, // Calculates White Balance(must be called during acquisition)\n       CAP_PROP_XI_AUTO_WB       = 414, // Automatic white balance\n       CAP_PROP_XI_AEAG          = 415, // Automatic exposure/gain\n       CAP_PROP_XI_EXP_PRIORITY  = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).\n       CAP_PROP_XI_AE_MAX_LIMIT  = 417, // Maximum limit of exposure in AEAG procedure\n       CAP_PROP_XI_AG_MAX_LIMIT  = 418, // Maximum limit of gain in AEAG procedure\n       CAP_PROP_XI_AEAG_LEVEL    = 419, // Average intensity of output signal AEAG should achieve(in %)\n       CAP_PROP_XI_TIMEOUT       = 420  // Image capture timeout in milliseconds\n     };\n\n// Properties of cameras available through AVFOUNDATION interface\nenum { CAP_PROP_IOS_DEVICE_FOCUS        = 9001,\n       CAP_PROP_IOS_DEVICE_EXPOSURE     = 9002,\n       CAP_PROP_IOS_DEVICE_FLASH        = 9003,\n       CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,\n       CAP_PROP_IOS_DEVICE_TORCH        = 9005\n     };\n\n\n// Properties of cameras available through Smartek Giganetix Ethernet Vision interface\n/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */\nenum { CAP_PROP_GIGA_FRAME_OFFSET_X   = 10001,\n       CAP_PROP_GIGA_FRAME_OFFSET_Y   = 10002,\n       CAP_PROP_GIGA_FRAME_WIDTH_MAX  = 10003,\n       CAP_PROP_GIGA_FRAME_HEIGH_MAX  = 10004,\n       CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,\n       CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006\n     };\n\nenum { CAP_PROP_INTELPERC_PROFILE_COUNT               = 11001,\n       CAP_PROP_INTELPERC_PROFILE_IDX                 = 11002,\n       CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE  = 11003,\n       CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE      = 11004,\n       CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD  = 11005,\n       CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ     = 11006,\n       CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT     = 11007\n     };\n\n// Intel PerC streams\nenum { CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,\n       CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,\n       CAP_INTELPERC_GENERATORS_MASK = CAP_INTELPERC_DEPTH_GENERATOR + CAP_INTELPERC_IMAGE_GENERATOR\n     };\n\nenum { CAP_INTELPERC_DEPTH_MAP              = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.\n       CAP_INTELPERC_UVDEPTH_MAP            = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.\n       CAP_INTELPERC_IR_MAP                 = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.\n       CAP_INTELPERC_IMAGE                  = 3\n     };\n\nenum { VIDEOWRITER_PROP_QUALITY = 1,    // Quality (0..100%) of the videostream encoded\n       VIDEOWRITER_PROP_FRAMEBYTES = 2, // (Read-only): Size of just encoded video frame\n       VIDEOWRITER_PROP_NSTRIPES = 3    // Number of stripes for parallel encoding. -1 for auto detection\n     };\n\n// gPhoto2 properties, if propertyId is less than 0 then work on widget with that __additive inversed__ camera setting ID\n// Get IDs by using CAP_PROP_GPHOTO2_WIDGET_ENUMERATE.\n// @see CvCaptureCAM_GPHOTO2 for more info\nenum { CAP_PROP_GPHOTO2_PREVIEW           = 17001, // Capture only preview from liveview mode.\n       CAP_PROP_GPHOTO2_WIDGET_ENUMERATE  = 17002, // Readonly, returns (const char *).\n       CAP_PROP_GPHOTO2_RELOAD_CONFIG     = 17003, // Trigger, only by set. Reload camera settings.\n       CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE  = 17004, // Reload all settings on set.\n       CAP_PROP_GPHOTO2_COLLECT_MSGS      = 17005, // Collect messages with details.\n       CAP_PROP_GPHOTO2_FLUSH_MSGS        = 17006, // Readonly, returns (const char *).\n       CAP_PROP_SPEED                     = 17007, // Exposure speed. Can be readonly, depends on camera program.\n       CAP_PROP_APERTURE                  = 17008, // Aperture. Can be readonly, depends on camera program.\n       CAP_PROP_EXPOSUREPROGRAM           = 17009, // Camera exposure program.\n       CAP_PROP_VIEWFINDER                = 17010  // Enter liveview mode.\n     };\n\n//enum {\n\nclass IVideoCapture;\n\n/** @brief Class for video capturing from video files, image sequences or cameras. The class provides C++ API\nfor capturing video from cameras or for reading video files and image sequences. Here is how the\nclass can be used: :\n@code\n    #include \"opencv2/opencv.hpp\"\n\n    using namespace cv;\n\n    int main(int, char**)\n    {\n        VideoCapture cap(0); // open the default camera\n        if(!cap.isOpened())  // check if we succeeded\n            return -1;\n\n        Mat edges;\n        namedWindow(\"edges\",1);\n        for(;;)\n        {\n            Mat frame;\n            cap >> frame; // get a new frame from camera\n            cvtColor(frame, edges, COLOR_BGR2GRAY);\n            GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);\n            Canny(edges, edges, 0, 30, 3);\n            imshow(\"edges\", edges);\n            if(waitKey(30) >= 0) break;\n        }\n        // the camera will be deinitialized automatically in VideoCapture destructor\n        return 0;\n    }\n@endcode\n@note In C API the black-box structure CvCapture is used instead of VideoCapture.\n\n@note\n-   A basic sample on using the VideoCapture interface can be found at\n    opencv_source_code/samples/cpp/starter_video.cpp\n-   Another basic video processing sample can be found at\n    opencv_source_code/samples/cpp/video_dmtx.cpp\n-   (Python) A basic sample on using the VideoCapture interface can be found at\n    opencv_source_code/samples/python/video.py\n-   (Python) Another basic video processing sample can be found at\n    opencv_source_code/samples/python/video_dmtx.py\n-   (Python) A multi threaded video processing sample can be found at\n    opencv_source_code/samples/python/video_threaded.py\n */\nclass CV_EXPORTS_W VideoCapture\n{\npublic:\n    /** @brief\n    @note In C API, when you finished working with video, release CvCapture structure with\n    cvReleaseCapture(), or use Ptr\\<CvCapture\\> that calls cvReleaseCapture() automatically in the\n    destructor.\n     */\n    CV_WRAP VideoCapture();\n\n    /** @overload\n    @param filename name of the opened video file (eg. video.avi) or image sequence (eg.\n    img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)\n    */\n    CV_WRAP VideoCapture(const String& filename);\n\n    /** @overload\n    @param filename name of the opened video file (eg. video.avi) or image sequence (eg.\n    img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)\n\n    @param apiPreference preferred Capture API to use. Can be used to enforce a specific reader\n    implementation if multiple are available: e.g. CAP_FFMPEG or CAP_IMAGES\n    */\n    CV_WRAP VideoCapture(const String& filename, int apiPreference);\n\n    /** @overload\n    @param index = camera_id + domain_offset (CAP_*). id of the video capturing device to open. If there is a single\n    camera connected, just pass 0. Advanced Usage: to open Camera 1 using the MS Media Foundation API: index = 1 + CAP_MSMF\n    */\n    CV_WRAP VideoCapture(int index);\n\n    virtual ~VideoCapture();\n\n    /** @brief Open video file or a capturing device for video capturing\n\n    @param filename name of the opened video file (eg. video.avi) or image sequence (eg.\n    img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)\n\n    The methods first call VideoCapture::release to close the already opened file or camera.\n     */\n    CV_WRAP virtual bool open(const String& filename);\n\n    /** @overload\n    @param index = camera_id + domain_offset (CAP_*). id of the video capturing device to open. If there is a single\n    camera connected, just pass 0. Advanced Usage: to open Camera 1 using the MS Media Foundation API: index = 1 + CAP_MSMF\n    */\n    CV_WRAP virtual bool open(int index);\n\n    /** @brief Returns true if video capturing has been initialized already.\n\n    If the previous call to VideoCapture constructor or VideoCapture::open succeeded, the method returns\n    true.\n     */\n    CV_WRAP virtual bool isOpened() const;\n\n    /** @brief Closes video file or capturing device.\n\n    The methods are automatically called by subsequent VideoCapture::open and by VideoCapture\n    destructor.\n\n    The C function also deallocates memory and clears \\*capture pointer.\n     */\n    CV_WRAP virtual void release();\n\n    /** @brief Grabs the next frame from video file or capturing device.\n\n    The methods/functions grab the next frame from video file or camera and return true (non-zero) in\n    the case of success.\n\n    The primary use of the function is in multi-camera environments, especially when the cameras do not\n    have hardware synchronization. That is, you call VideoCapture::grab() for each camera and after that\n    call the slower method VideoCapture::retrieve() to decode and get frame from each camera. This way\n    the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames\n    from different cameras will be closer in time.\n\n    Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the\n    correct way of retrieving data from it is to call VideoCapture::grab first and then call\n    VideoCapture::retrieve one or more times with different values of the channel parameter. See\n    <https://github.com/Itseez/opencv/tree/master/samples/cpp/openni_capture.cpp>\n     */\n    CV_WRAP virtual bool grab();\n\n    /** @brief Decodes and returns the grabbed video frame.\n\n    The methods/functions decode and return the just grabbed frame. If no frames has been grabbed\n    (camera has been disconnected, or there are no more frames in video file), the methods return false\n    and the functions return NULL pointer.\n\n    @note OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame return image stored inside the video\n    capturing structure. It is not allowed to modify or release the image! You can copy the frame using\n    :ocvcvCloneImage and then do whatever you want with the copy.\n     */\n    CV_WRAP virtual bool retrieve(OutputArray image, int flag = 0);\n    virtual VideoCapture& operator >> (CV_OUT Mat& image);\n    virtual VideoCapture& operator >> (CV_OUT UMat& image);\n\n    /** @brief Grabs, decodes and returns the next video frame.\n\n    The methods/functions combine VideoCapture::grab and VideoCapture::retrieve in one call. This is the\n    most convenient method for reading video files or capturing data from decode and return the just\n    grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more\n    frames in video file), the methods return false and the functions return NULL pointer.\n\n    @note OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame return image stored inside the video\n    capturing structure. It is not allowed to modify or release the image! You can copy the frame using\n    :ocvcvCloneImage and then do whatever you want with the copy.\n     */\n    CV_WRAP virtual bool read(OutputArray image);\n\n    /** @brief Sets a property in the VideoCapture.\n\n    @param propId Property identifier. It can be one of the following:\n     -   **CAP_PROP_POS_MSEC** Current position of the video file in milliseconds.\n     -   **CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.\n     -   **CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the\n         film, 1 - end of the film.\n     -   **CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.\n     -   **CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.\n     -   **CAP_PROP_FPS** Frame rate.\n     -   **CAP_PROP_FOURCC** 4-character code of codec.\n     -   **CAP_PROP_FRAME_COUNT** Number of frames in the video file.\n     -   **CAP_PROP_FORMAT** Format of the Mat objects returned by retrieve() .\n     -   **CAP_PROP_MODE** Backend-specific value indicating the current capture mode.\n     -   **CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).\n     -   **CAP_PROP_CONTRAST** Contrast of the image (only for cameras).\n     -   **CAP_PROP_SATURATION** Saturation of the image (only for cameras).\n     -   **CAP_PROP_HUE** Hue of the image (only for cameras).\n     -   **CAP_PROP_GAIN** Gain of the image (only for cameras).\n     -   **CAP_PROP_EXPOSURE** Exposure (only for cameras).\n     -   **CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted\n         to RGB.\n     -   **CAP_PROP_WHITE_BALANCE** Currently unsupported\n     -   **CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported\n         by DC1394 v 2.x backend currently)\n    @param value Value of the property.\n     */\n    CV_WRAP virtual bool set(int propId, double value);\n\n    /** @brief Returns the specified VideoCapture property\n\n    @param propId Property identifier. It can be one of the following:\n     -   **CAP_PROP_POS_MSEC** Current position of the video file in milliseconds or video\n         capture timestamp.\n     -   **CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.\n     -   **CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the\n         film, 1 - end of the film.\n     -   **CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.\n     -   **CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.\n     -   **CAP_PROP_FPS** Frame rate.\n     -   **CAP_PROP_FOURCC** 4-character code of codec.\n     -   **CAP_PROP_FRAME_COUNT** Number of frames in the video file.\n     -   **CAP_PROP_FORMAT** Format of the Mat objects returned by retrieve() .\n     -   **CAP_PROP_MODE** Backend-specific value indicating the current capture mode.\n     -   **CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).\n     -   **CAP_PROP_CONTRAST** Contrast of the image (only for cameras).\n     -   **CAP_PROP_SATURATION** Saturation of the image (only for cameras).\n     -   **CAP_PROP_HUE** Hue of the image (only for cameras).\n     -   **CAP_PROP_GAIN** Gain of the image (only for cameras).\n     -   **CAP_PROP_EXPOSURE** Exposure (only for cameras).\n     -   **CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted\n         to RGB.\n     -   **CAP_PROP_WHITE_BALANCE** Currently not supported\n     -   **CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported\n         by DC1394 v 2.x backend currently)\n\n    @note When querying a property that is not supported by the backend used by the VideoCapture\n    class, value 0 is returned.\n     */\n    CV_WRAP virtual double get(int propId) const;\n\n    /** @overload\n\n    @param filename name of the opened video file (eg. video.avi) or image sequence (eg.\n    img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)\n\n    @param apiPreference preferred Capture API to use. Can be used to enforce a specific reader\n    implementation if multiple are available: e.g. CAP_FFMPEG or CAP_IMAGES\n\n    The methods first call VideoCapture::release to close the already opened file or camera.\n     */\n    CV_WRAP virtual bool open(const String& filename, int apiPreference);\n\nprotected:\n    Ptr<CvCapture> cap;\n    Ptr<IVideoCapture> icap;\n};\n\nclass IVideoWriter;\n\n/** @brief Video writer class.\n */\nclass CV_EXPORTS_W VideoWriter\n{\npublic:\n    /** @brief VideoWriter constructors\n\n    The constructors/functions initialize video writers. On Linux FFMPEG is used to write videos; on\n    Windows FFMPEG or VFW is used; on MacOSX QTKit is used.\n     */\n    CV_WRAP VideoWriter();\n\n    /** @overload\n    @param filename Name of the output video file.\n    @param fourcc 4-character code of codec used to compress the frames. For example,\n    VideoWriter::fourcc('P','I','M','1') is a MPEG-1 codec, VideoWriter::fourcc('M','J','P','G') is a\n    motion-jpeg codec etc. List of codes can be obtained at [Video Codecs by\n    FOURCC](http://www.fourcc.org/codecs.php) page. FFMPEG backend with MP4 container natively uses\n    other values as fourcc code: see [ObjectType](http://www.mp4ra.org/codecs.html),\n    so you may receive a warning message from OpenCV about fourcc code conversion.\n    @param fps Framerate of the created video stream.\n    @param frameSize Size of the video frames.\n    @param isColor If it is not zero, the encoder will expect and encode color frames, otherwise it\n    will work with grayscale frames (the flag is currently supported on Windows only).\n    */\n    CV_WRAP VideoWriter(const String& filename, int fourcc, double fps,\n                Size frameSize, bool isColor = true);\n\n    virtual ~VideoWriter();\n\n    /** @brief Initializes or reinitializes video writer.\n\n    The method opens video writer. Parameters are the same as in the constructor\n    VideoWriter::VideoWriter.\n     */\n    CV_WRAP virtual bool open(const String& filename, int fourcc, double fps,\n                      Size frameSize, bool isColor = true);\n\n    /** @brief Returns true if video writer has been successfully initialized.\n    */\n    CV_WRAP virtual bool isOpened() const;\n\n    /** @brief Closes the video writer.\n\n    The methods are automatically called by subsequent VideoWriter::open and by the VideoWriter\n    destructor.\n     */\n    CV_WRAP virtual void release();\n    virtual VideoWriter& operator << (const Mat& image);\n\n    /** @brief Writes the next video frame\n\n    @param image The written frame\n\n    The functions/methods write the specified image to video file. It must have the same size as has\n    been specified when opening the video writer.\n     */\n    CV_WRAP virtual void write(const Mat& image);\n\n    /** @brief Sets a property in the VideoWriter.\n\n     @param propId Property identifier. It can be one of the following:\n     -   **VIDEOWRITER_PROP_QUALITY** Quality (0..100%) of the videostream encoded. Can be adjusted dynamically in some codecs.\n     -   **VIDEOWRITER_PROP_NSTRIPES** Number of stripes for parallel encoding\n     @param value Value of the property.\n     */\n    CV_WRAP virtual bool set(int propId, double value);\n\n    /** @brief Returns the specified VideoWriter property\n\n     @param propId Property identifier. It can be one of the following:\n     -   **VIDEOWRITER_PROP_QUALITY** Current quality of the encoded videostream.\n     -   **VIDEOWRITER_PROP_FRAMEBYTES** (Read-only) Size of just encoded video frame; note that the encoding order may be different from representation order.\n     -   **VIDEOWRITER_PROP_NSTRIPES** Number of stripes for parallel encoding\n\n     @note When querying a property that is not supported by the backend used by the VideoWriter\n     class, value 0 is returned.\n     */\n    CV_WRAP virtual double get(int propId) const;\n\n    /** @brief Concatenates 4 chars to a fourcc code\n\n    This static method constructs the fourcc code of the codec to be used in the constructor\n    VideoWriter::VideoWriter or VideoWriter::open.\n     */\n    CV_WRAP static int fourcc(char c1, char c2, char c3, char c4);\n\nprotected:\n    Ptr<CvVideoWriter> writer;\n    Ptr<IVideoWriter> iwriter;\n\n    static Ptr<IVideoWriter> create(const String& filename, int fourcc, double fps,\n                                    Size frameSize, bool isColor = true);\n};\n\ntemplate<> CV_EXPORTS void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const;\ntemplate<> CV_EXPORTS void DefaultDeleter<CvVideoWriter>::operator ()(CvVideoWriter* obj) const;\n\n//! @} videoio\n\n} // cv\n\n#endif //__OPENCV_VIDEOIO_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/deblurring.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_DEBLURRING_HPP__\n#define __OPENCV_VIDEOSTAB_DEBLURRING_HPP__\n\n#include <vector>\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab\n//! @{\n\nCV_EXPORTS float calcBlurriness(const Mat &frame);\n\nclass CV_EXPORTS DeblurerBase\n{\npublic:\n    DeblurerBase() : radius_(0), frames_(0), motions_(0), blurrinessRates_(0) {}\n\n    virtual ~DeblurerBase() {}\n\n    virtual void setRadius(int val) { radius_ = val; }\n    virtual int radius() const { return radius_; }\n\n    virtual void deblur(int idx, Mat &frame) = 0;\n\n\n    // data from stabilizer\n\n    virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; }\n    virtual const std::vector<Mat>& frames() const { return *frames_; }\n\n    virtual void setMotions(const std::vector<Mat> &val) { motions_ = &val; }\n    virtual const std::vector<Mat>& motions() const { return *motions_; }\n\n    virtual void setBlurrinessRates(const std::vector<float> &val) { blurrinessRates_ = &val; }\n    virtual const std::vector<float>& blurrinessRates() const { return *blurrinessRates_; }\n\nprotected:\n    int radius_;\n    const std::vector<Mat> *frames_;\n    const std::vector<Mat> *motions_;\n    const std::vector<float> *blurrinessRates_;\n};\n\nclass CV_EXPORTS NullDeblurer : public DeblurerBase\n{\npublic:\n    virtual void deblur(int /*idx*/, Mat &/*frame*/) {}\n};\n\nclass CV_EXPORTS WeightingDeblurer : public DeblurerBase\n{\npublic:\n    WeightingDeblurer();\n\n    void setSensitivity(float val) { sensitivity_ = val; }\n    float sensitivity() const { return sensitivity_; }\n\n    virtual void deblur(int idx, Mat &frame);\n\nprivate:\n    float sensitivity_;\n    Mat_<float> bSum_, gSum_, rSum_, wSum_;\n};\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/fast_marching.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_FAST_MARCHING_HPP__\n#define __OPENCV_VIDEOSTAB_FAST_MARCHING_HPP__\n\n#include <cmath>\n#include <queue>\n#include <algorithm>\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab_marching\n//! @{\n\n/** @brief Describes the Fast Marching Method implementation.\n\n  See http://iwi.eldoc.ub.rug.nl/FILES/root/2004/JGraphToolsTelea/2004JGraphToolsTelea.pdf\n */\nclass CV_EXPORTS FastMarchingMethod\n{\npublic:\n    FastMarchingMethod() : inf_(1e6f) {}\n\n    /** @brief Template method that runs the Fast Marching Method.\n\n    @param mask Image mask. 0 value indicates that the pixel value must be inpainted, 255 indicates\n    that the pixel value is known, other values aren't acceptable.\n    @param inpaint Inpainting functor that overloads void operator ()(int x, int y).\n    @return Inpainting functor.\n     */\n    template <typename Inpaint>\n    Inpaint run(const Mat &mask, Inpaint inpaint);\n\n    /**\n    @return Distance map that's created during working of the method.\n    */\n    Mat distanceMap() const { return dist_; }\n\nprivate:\n    enum { INSIDE = 0, BAND = 1, KNOWN = 255 };\n\n    struct DXY\n    {\n        float dist;\n        int x, y;\n\n        DXY() : dist(0), x(0), y(0) {}\n        DXY(float _dist, int _x, int _y) : dist(_dist), x(_x), y(_y) {}\n        bool operator <(const DXY &dxy) const { return dist < dxy.dist; }\n    };\n\n    float solve(int x1, int y1, int x2, int y2) const;\n    int& indexOf(const DXY &dxy) { return index_(dxy.y, dxy.x); }\n\n    void heapUp(int idx);\n    void heapDown(int idx);\n    void heapAdd(const DXY &dxy);\n    void heapRemoveMin();\n\n    float inf_;\n\n    cv::Mat_<uchar> flag_; // flag map\n    cv::Mat_<float> dist_; // distance map\n\n    cv::Mat_<int> index_; // index of point in the narrow band\n    std::vector<DXY> narrowBand_; // narrow band heap\n    int size_; // narrow band size\n};\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#include \"fast_marching_inl.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/fast_marching_inl.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP__\n#define __OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP__\n\n#include \"opencv2/videostab/fast_marching.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\ntemplate <typename Inpaint>\nInpaint FastMarchingMethod::run(const cv::Mat &mask, Inpaint inpaint)\n{\n    using namespace cv;\n\n    CV_Assert(mask.type() == CV_8U);\n\n    static const int lut[4][2] = {{-1,0}, {0,-1}, {1,0}, {0,1}};\n\n    mask.copyTo(flag_);\n    flag_.create(mask.size());\n    dist_.create(mask.size());\n    index_.create(mask.size());\n    narrowBand_.clear();\n    size_ = 0;\n\n    // init\n    for (int y = 0; y < flag_.rows; ++y)\n    {\n        for (int x = 0; x < flag_.cols; ++x)\n        {\n            if (flag_(y,x) == KNOWN)\n                dist_(y,x) = 0.f;\n            else\n            {\n                int n = 0;\n                int nunknown = 0;\n\n                for (int i = 0; i < 4; ++i)\n                {\n                    int xn = x + lut[i][0];\n                    int yn = y + lut[i][1];\n\n                    if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows)\n                    {\n                        n++;\n                        if (flag_(yn,xn) != KNOWN)\n                            nunknown++;\n                    }\n                }\n\n                if (n>0 && nunknown == n)\n                {\n                    dist_(y,x) = inf_;\n                    flag_(y,x) = INSIDE;\n                }\n                else\n                {\n                    dist_(y,x) = 0.f;\n                    flag_(y,x) = BAND;\n                    inpaint(x, y);\n\n                    narrowBand_.push_back(DXY(0.f,x,y));\n                    index_(y,x) = size_++;\n                }\n            }\n        }\n    }\n\n    // make heap\n    for (int i = size_/2-1; i >= 0; --i)\n        heapDown(i);\n\n    // main cycle\n    while (size_ > 0)\n    {\n        int x = narrowBand_[0].x;\n        int y = narrowBand_[0].y;\n        heapRemoveMin();\n\n        flag_(y,x) = KNOWN;\n        for (int n = 0; n < 4; ++n)\n        {\n            int xn = x + lut[n][0];\n            int yn = y + lut[n][1];\n\n            if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows && flag_(yn,xn) != KNOWN)\n            {\n                dist_(yn,xn) = std::min(std::min(solve(xn-1, yn, xn, yn-1), solve(xn+1, yn, xn, yn-1)),\n                                        std::min(solve(xn-1, yn, xn, yn+1), solve(xn+1, yn, xn, yn+1)));\n\n                if (flag_(yn,xn) == INSIDE)\n                {\n                    flag_(yn,xn) = BAND;\n                    inpaint(xn, yn);\n                    heapAdd(DXY(dist_(yn,xn),xn,yn));\n                }\n                else\n                {\n                    int i = index_(yn,xn);\n                    if (dist_(yn,xn) < narrowBand_[i].dist)\n                    {\n                        narrowBand_[i].dist = dist_(yn,xn);\n                        heapUp(i);\n                    }\n                    // works better if it's commented out\n                    /*else if (dist(yn,xn) > narrowBand[i].dist)\n                    {\n                        narrowBand[i].dist = dist(yn,xn);\n                        heapDown(i);\n                    }*/\n                }\n            }\n        }\n    }\n\n    return inpaint;\n}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/frame_source.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP__\n#define __OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP__\n\n#include <vector>\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab\n//! @{\n\nclass CV_EXPORTS IFrameSource\n{\npublic:\n    virtual ~IFrameSource() {}\n    virtual void reset() = 0;\n    virtual Mat nextFrame() = 0;\n};\n\nclass CV_EXPORTS NullFrameSource : public IFrameSource\n{\npublic:\n    virtual void reset() {}\n    virtual Mat nextFrame() { return Mat(); }\n};\n\nclass CV_EXPORTS VideoFileSource : public IFrameSource\n{\npublic:\n    VideoFileSource(const String &path, bool volatileFrame = false);\n\n    virtual void reset();\n    virtual Mat nextFrame();\n\n    int width();\n    int height();\n    int count();\n    double fps();\n\nprivate:\n    Ptr<IFrameSource> impl;\n};\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/global_motion.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP__\n#define __OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP__\n\n#include <vector>\n#include <fstream>\n#include \"opencv2/core.hpp\"\n#include \"opencv2/features2d.hpp\"\n#include \"opencv2/opencv_modules.hpp\"\n#include \"opencv2/videostab/optical_flow.hpp\"\n#include \"opencv2/videostab/motion_core.hpp\"\n#include \"opencv2/videostab/outlier_rejection.hpp\"\n\n#ifdef HAVE_OPENCV_CUDAIMGPROC\n#  include \"opencv2/cudaimgproc.hpp\"\n#endif\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab_motion\n//! @{\n\n/** @brief Estimates best global motion between two 2D point clouds in the least-squares sense.\n\n@note Works in-place and changes input point arrays.\n\n@param points0 Source set of 2D points (32F).\n@param points1 Destination set of 2D points (32F).\n@param model Motion model (up to MM_AFFINE).\n@param rmse Final root-mean-square error.\n@return 3x3 2D transformation matrix (32F).\n */\nCV_EXPORTS Mat estimateGlobalMotionLeastSquares(\n        InputOutputArray points0, InputOutputArray points1, int model = MM_AFFINE,\n        float *rmse = 0);\n\n/** @brief Estimates best global motion between two 2D point clouds robustly (using RANSAC method).\n\n@param points0 Source set of 2D points (32F).\n@param points1 Destination set of 2D points (32F).\n@param model Motion model. See cv::videostab::MotionModel.\n@param params RANSAC method parameters. See videostab::RansacParams.\n@param rmse Final root-mean-square error.\n@param ninliers Final number of inliers.\n */\nCV_EXPORTS Mat estimateGlobalMotionRansac(\n        InputArray points0, InputArray points1, int model = MM_AFFINE,\n        const RansacParams &params = RansacParams::default2dMotion(MM_AFFINE),\n        float *rmse = 0, int *ninliers = 0);\n\n/** @brief Base class for all global motion estimation methods.\n */\nclass CV_EXPORTS MotionEstimatorBase\n{\npublic:\n    virtual ~MotionEstimatorBase() {}\n\n    /** @brief Sets motion model.\n\n    @param val Motion model. See cv::videostab::MotionModel.\n     */\n    virtual void setMotionModel(MotionModel val) { motionModel_ = val; }\n\n    /**\n    @return Motion model. See cv::videostab::MotionModel.\n    */\n    virtual MotionModel motionModel() const { return motionModel_; }\n\n    /** @brief Estimates global motion between two 2D point clouds.\n\n    @param points0 Source set of 2D points (32F).\n    @param points1 Destination set of 2D points (32F).\n    @param ok Indicates whether motion was estimated successfully.\n    @return 3x3 2D transformation matrix (32F).\n     */\n    virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) = 0;\n\nprotected:\n    MotionEstimatorBase(MotionModel model) { setMotionModel(model); }\n\nprivate:\n    MotionModel motionModel_;\n};\n\n/** @brief Describes a robust RANSAC-based global 2D motion estimation method which minimizes L2 error.\n */\nclass CV_EXPORTS MotionEstimatorRansacL2 : public MotionEstimatorBase\n{\npublic:\n    MotionEstimatorRansacL2(MotionModel model = MM_AFFINE);\n\n    void setRansacParams(const RansacParams &val) { ransacParams_ = val; }\n    RansacParams ransacParams() const { return ransacParams_; }\n\n    void setMinInlierRatio(float val) { minInlierRatio_ = val; }\n    float minInlierRatio() const { return minInlierRatio_; }\n\n    virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0);\n\nprivate:\n    RansacParams ransacParams_;\n    float minInlierRatio_;\n};\n\n/** @brief Describes a global 2D motion estimation method which minimizes L1 error.\n\n@note To be able to use this method you must build OpenCV with CLP library support. :\n */\nclass CV_EXPORTS MotionEstimatorL1 : public MotionEstimatorBase\n{\npublic:\n    MotionEstimatorL1(MotionModel model = MM_AFFINE);\n\n    virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0);\n\nprivate:\n    std::vector<double> obj_, collb_, colub_;\n    std::vector<double> elems_, rowlb_, rowub_;\n    std::vector<int> rows_, cols_;\n\n    void set(int row, int col, double coef)\n    {\n        rows_.push_back(row);\n        cols_.push_back(col);\n        elems_.push_back(coef);\n    }\n};\n\n/** @brief Base class for global 2D motion estimation methods which take frames as input.\n */\nclass CV_EXPORTS ImageMotionEstimatorBase\n{\npublic:\n    virtual ~ImageMotionEstimatorBase() {}\n\n    virtual void setMotionModel(MotionModel val) { motionModel_ = val; }\n    virtual MotionModel motionModel() const { return motionModel_; }\n\n    virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) = 0;\n\nprotected:\n    ImageMotionEstimatorBase(MotionModel model) { setMotionModel(model); }\n\nprivate:\n    MotionModel motionModel_;\n};\n\nclass CV_EXPORTS FromFileMotionReader : public ImageMotionEstimatorBase\n{\npublic:\n    FromFileMotionReader(const String &path);\n\n    virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);\n\nprivate:\n    std::ifstream file_;\n};\n\nclass CV_EXPORTS ToFileMotionWriter : public ImageMotionEstimatorBase\n{\npublic:\n    ToFileMotionWriter(const String &path, Ptr<ImageMotionEstimatorBase> estimator);\n\n    virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }\n    virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }\n\n    virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);\n\nprivate:\n    std::ofstream file_;\n    Ptr<ImageMotionEstimatorBase> motionEstimator_;\n};\n\n/** @brief Describes a global 2D motion estimation method which uses keypoints detection and optical flow for\nmatching.\n */\nclass CV_EXPORTS KeypointBasedMotionEstimator : public ImageMotionEstimatorBase\n{\npublic:\n    KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator);\n\n    virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }\n    virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }\n\n    void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }\n    Ptr<FeatureDetector> detector() const { return detector_; }\n\n    void setOpticalFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }\n    Ptr<ISparseOptFlowEstimator> opticalFlowEstimator() const { return optFlowEstimator_; }\n\n    void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }\n    Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }\n\n    virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);\n\nprivate:\n    Ptr<MotionEstimatorBase> motionEstimator_;\n    Ptr<FeatureDetector> detector_;\n    Ptr<ISparseOptFlowEstimator> optFlowEstimator_;\n    Ptr<IOutlierRejector> outlierRejector_;\n\n    std::vector<uchar> status_;\n    std::vector<KeyPoint> keypointsPrev_;\n    std::vector<Point2f> pointsPrev_, points_;\n    std::vector<Point2f> pointsPrevGood_, pointsGood_;\n};\n\n#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)\n\nclass CV_EXPORTS KeypointBasedMotionEstimatorGpu : public ImageMotionEstimatorBase\n{\npublic:\n    KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator);\n\n    virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }\n    virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }\n\n    void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }\n    Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }\n\n    virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);\n    Mat estimate(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, bool *ok = 0);\n\nprivate:\n    Ptr<MotionEstimatorBase> motionEstimator_;\n    Ptr<cuda::CornersDetector> detector_;\n    SparsePyrLkOptFlowEstimatorGpu optFlowEstimator_;\n    Ptr<IOutlierRejector> outlierRejector_;\n\n    cuda::GpuMat frame0_, grayFrame0_, frame1_;\n    cuda::GpuMat pointsPrev_, points_;\n    cuda::GpuMat status_;\n\n    Mat hostPointsPrev_, hostPoints_;\n    std::vector<Point2f> hostPointsPrevTmp_, hostPointsTmp_;\n    std::vector<uchar> rejectionStatus_;\n};\n\n#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)\n\n/** @brief Computes motion between two frames assuming that all the intermediate motions are known.\n\n@param from Source frame index.\n@param to Destination frame index.\n@param motions Pair-wise motions. motions[i] denotes motion from the frame i to the frame i+1\n@return Motion from the frame from to the frame to.\n */\nCV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions);\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/inpainting.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_INPAINTINT_HPP__\n#define __OPENCV_VIDEOSTAB_INPAINTINT_HPP__\n\n#include <vector>\n#include \"opencv2/core.hpp\"\n#include \"opencv2/videostab/optical_flow.hpp\"\n#include \"opencv2/videostab/fast_marching.hpp\"\n#include \"opencv2/videostab/global_motion.hpp\"\n#include \"opencv2/photo.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab\n//! @{\n\nclass CV_EXPORTS InpainterBase\n{\npublic:\n    InpainterBase()\n        : radius_(0), motionModel_(MM_UNKNOWN), frames_(0), motions_(0),\n          stabilizedFrames_(0), stabilizationMotions_(0) {}\n\n    virtual ~InpainterBase() {}\n\n    virtual void setRadius(int val) { radius_ = val; }\n    virtual int radius() const { return radius_; }\n\n    virtual void setMotionModel(MotionModel val) { motionModel_ = val; }\n    virtual MotionModel motionModel() const { return motionModel_; }\n\n    virtual void inpaint(int idx, Mat &frame, Mat &mask) = 0;\n\n\n    // data from stabilizer\n\n    virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; }\n    virtual const std::vector<Mat>& frames() const { return *frames_; }\n\n    virtual void setMotions(const std::vector<Mat> &val) { motions_ = &val; }\n    virtual const std::vector<Mat>& motions() const { return *motions_; }\n\n    virtual void setStabilizedFrames(const std::vector<Mat> &val) { stabilizedFrames_ = &val; }\n    virtual const std::vector<Mat>& stabilizedFrames() const { return *stabilizedFrames_; }\n\n    virtual void setStabilizationMotions(const std::vector<Mat> &val) { stabilizationMotions_ = &val; }\n    virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; }\n\nprotected:\n    int radius_;\n    MotionModel motionModel_;\n    const std::vector<Mat> *frames_;\n    const std::vector<Mat> *motions_;\n    const std::vector<Mat> *stabilizedFrames_;\n    const std::vector<Mat> *stabilizationMotions_;\n};\n\nclass CV_EXPORTS NullInpainter : public InpainterBase\n{\npublic:\n    virtual void inpaint(int /*idx*/, Mat &/*frame*/, Mat &/*mask*/) {}\n};\n\nclass CV_EXPORTS InpaintingPipeline : public InpainterBase\n{\npublic:\n    void pushBack(Ptr<InpainterBase> inpainter) { inpainters_.push_back(inpainter); }\n    bool empty() const { return inpainters_.empty(); }\n\n    virtual void setRadius(int val);\n    virtual void setMotionModel(MotionModel val);\n    virtual void setFrames(const std::vector<Mat> &val);\n    virtual void setMotions(const std::vector<Mat> &val);\n    virtual void setStabilizedFrames(const std::vector<Mat> &val);\n    virtual void setStabilizationMotions(const std::vector<Mat> &val);\n\n    virtual void inpaint(int idx, Mat &frame, Mat &mask);\n\nprivate:\n    std::vector<Ptr<InpainterBase> > inpainters_;\n};\n\nclass CV_EXPORTS ConsistentMosaicInpainter : public InpainterBase\n{\npublic:\n    ConsistentMosaicInpainter();\n\n    void setStdevThresh(float val) { stdevThresh_ = val; }\n    float stdevThresh() const { return stdevThresh_; }\n\n    virtual void inpaint(int idx, Mat &frame, Mat &mask);\n\nprivate:\n    float stdevThresh_;\n};\n\nclass CV_EXPORTS MotionInpainter : public InpainterBase\n{\npublic:\n    MotionInpainter();\n\n    void setOptFlowEstimator(Ptr<IDenseOptFlowEstimator> val) { optFlowEstimator_ = val; }\n    Ptr<IDenseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }\n\n    void setFlowErrorThreshold(float val) { flowErrorThreshold_ = val; }\n    float flowErrorThreshold() const { return flowErrorThreshold_; }\n\n    void setDistThreshold(float val) { distThresh_ = val; }\n    float distThresh() const { return distThresh_; }\n\n    void setBorderMode(int val) { borderMode_ = val; }\n    int borderMode() const { return borderMode_; }\n\n    virtual void inpaint(int idx, Mat &frame, Mat &mask);\n\nprivate:\n    FastMarchingMethod fmm_;\n    Ptr<IDenseOptFlowEstimator> optFlowEstimator_;\n    float flowErrorThreshold_;\n    float distThresh_;\n    int borderMode_;\n\n    Mat frame1_, transformedFrame1_;\n    Mat_<uchar> grayFrame_, transformedGrayFrame1_;\n    Mat_<uchar> mask1_, transformedMask1_;\n    Mat_<float> flowX_, flowY_, flowErrors_;\n    Mat_<uchar> flowMask_;\n};\n\nclass CV_EXPORTS ColorAverageInpainter : public InpainterBase\n{\npublic:\n    virtual void inpaint(int idx, Mat &frame, Mat &mask);\n\nprivate:\n    FastMarchingMethod fmm_;\n};\n\nclass CV_EXPORTS ColorInpainter : public InpainterBase\n{\npublic:\n    ColorInpainter(int method = INPAINT_TELEA, double radius = 2.);\n\n    virtual void inpaint(int idx, Mat &frame, Mat &mask);\n\nprivate:\n    int method_;\n    double radius_;\n    Mat invMask_;\n};\n\ninline ColorInpainter::ColorInpainter(int _method, double _radius)\n        : method_(_method), radius_(_radius) {}\n\nCV_EXPORTS void calcFlowMask(\n        const Mat &flowX, const Mat &flowY, const Mat &errors, float maxError,\n        const Mat &mask0, const Mat &mask1, Mat &flowMask);\n\nCV_EXPORTS void completeFrameAccordingToFlow(\n        const Mat &flowMask, const Mat &flowX, const Mat &flowY, const Mat &frame1, const Mat &mask1,\n        float distThresh, Mat& frame0, Mat &mask0);\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/log.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_LOG_HPP__\n#define __OPENCV_VIDEOSTAB_LOG_HPP__\n\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab\n//! @{\n\nclass CV_EXPORTS ILog\n{\npublic:\n    virtual ~ILog() {}\n    virtual void print(const char *format, ...) = 0;\n};\n\nclass CV_EXPORTS NullLog : public ILog\n{\npublic:\n    virtual void print(const char * /*format*/, ...) {}\n};\n\nclass CV_EXPORTS LogToStdout : public ILog\n{\npublic:\n    virtual void print(const char *format, ...);\n};\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/motion_core.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_MOTION_CORE_HPP__\n#define __OPENCV_VIDEOSTAB_MOTION_CORE_HPP__\n\n#include <cmath>\n#include \"opencv2/core.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab_motion\n//! @{\n\n/** @brief Describes motion model between two point clouds.\n */\nenum MotionModel\n{\n    MM_TRANSLATION = 0,\n    MM_TRANSLATION_AND_SCALE = 1,\n    MM_ROTATION = 2,\n    MM_RIGID = 3,\n    MM_SIMILARITY = 4,\n    MM_AFFINE = 5,\n    MM_HOMOGRAPHY = 6,\n    MM_UNKNOWN = 7\n};\n\n/** @brief Describes RANSAC method parameters.\n */\nstruct CV_EXPORTS RansacParams\n{\n    int size; //!< subset size\n    float thresh; //!< max error to classify as inlier\n    float eps; //!< max outliers ratio\n    float prob; //!< probability of success\n\n    RansacParams() : size(0), thresh(0), eps(0), prob(0) {}\n    /** @brief Constructor\n    @param size Subset size.\n    @param thresh Maximum re-projection error value to classify as inlier.\n    @param eps Maximum ratio of incorrect correspondences.\n    @param prob Required success probability.\n     */\n    RansacParams(int size, float thresh, float eps, float prob);\n\n    /**\n    @return Number of iterations that'll be performed by RANSAC method.\n    */\n    int niters() const\n    {\n        return static_cast<int>(\n                std::ceil(std::log(1 - prob) / std::log(1 - std::pow(1 - eps, size))));\n    }\n\n    /**\n    @param model Motion model. See cv::videostab::MotionModel.\n    @return Default RANSAC method parameters for the given motion model.\n    */\n    static RansacParams default2dMotion(MotionModel model)\n    {\n        CV_Assert(model < MM_UNKNOWN);\n        if (model == MM_TRANSLATION)\n            return RansacParams(1, 0.5f, 0.5f, 0.99f);\n        if (model == MM_TRANSLATION_AND_SCALE)\n            return RansacParams(2, 0.5f, 0.5f, 0.99f);\n        if (model == MM_ROTATION)\n            return RansacParams(1, 0.5f, 0.5f, 0.99f);\n        if (model == MM_RIGID)\n            return RansacParams(2, 0.5f, 0.5f, 0.99f);\n        if (model == MM_SIMILARITY)\n            return RansacParams(2, 0.5f, 0.5f, 0.99f);\n        if (model == MM_AFFINE)\n            return RansacParams(3, 0.5f, 0.5f, 0.99f);\n        return RansacParams(4, 0.5f, 0.5f, 0.99f);\n    }\n};\n\ninline RansacParams::RansacParams(int _size, float _thresh, float _eps, float _prob)\n    : size(_size), thresh(_thresh), eps(_eps), prob(_prob) {}\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/motion_stabilizing.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP__\n#define __OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP__\n\n#include <vector>\n#include <utility>\n#include \"opencv2/core.hpp\"\n#include \"opencv2/videostab/global_motion.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab_motion\n//! @{\n\nclass CV_EXPORTS IMotionStabilizer\n{\npublic:\n    virtual ~IMotionStabilizer() {}\n\n    //! assumes that [0, size-1) is in or equals to [range.first, range.second)\n    virtual void stabilize(\n            int size, const std::vector<Mat> &motions, std::pair<int,int> range,\n            Mat *stabilizationMotions) = 0;\n};\n\nclass CV_EXPORTS MotionStabilizationPipeline : public IMotionStabilizer\n{\npublic:\n    void pushBack(Ptr<IMotionStabilizer> stabilizer) { stabilizers_.push_back(stabilizer); }\n    bool empty() const { return stabilizers_.empty(); }\n\n    virtual void stabilize(\n            int size, const std::vector<Mat> &motions, std::pair<int,int> range,\n            Mat *stabilizationMotions);\n\nprivate:\n    std::vector<Ptr<IMotionStabilizer> > stabilizers_;\n};\n\nclass CV_EXPORTS MotionFilterBase : public IMotionStabilizer\n{\npublic:\n    virtual ~MotionFilterBase() {}\n\n    virtual Mat stabilize(\n            int idx, const std::vector<Mat> &motions, std::pair<int,int> range) = 0;\n\n    virtual void stabilize(\n            int size, const std::vector<Mat> &motions, std::pair<int,int> range,\n            Mat *stabilizationMotions);\n};\n\nclass CV_EXPORTS GaussianMotionFilter : public MotionFilterBase\n{\npublic:\n    GaussianMotionFilter(int radius = 15, float stdev = -1.f);\n\n    void setParams(int radius, float stdev = -1.f);\n    int radius() const { return radius_; }\n    float stdev() const { return stdev_; }\n\n    virtual Mat stabilize(\n            int idx, const std::vector<Mat> &motions, std::pair<int,int> range);\n\nprivate:\n    int radius_;\n    float stdev_;\n    std::vector<float> weight_;\n};\n\ninline GaussianMotionFilter::GaussianMotionFilter(int _radius, float _stdev) { setParams(_radius, _stdev); }\n\nclass CV_EXPORTS LpMotionStabilizer : public IMotionStabilizer\n{\npublic:\n    LpMotionStabilizer(MotionModel model = MM_SIMILARITY);\n\n    void setMotionModel(MotionModel val) { model_ = val; }\n    MotionModel motionModel() const { return model_; }\n\n    void setFrameSize(Size val) { frameSize_ = val; }\n    Size frameSize() const { return frameSize_; }\n\n    void setTrimRatio(float val) { trimRatio_ = val; }\n    float trimRatio() const { return trimRatio_; }\n\n    void setWeight1(float val) { w1_ = val; }\n    float weight1() const { return w1_; }\n\n    void setWeight2(float val) { w2_ = val; }\n    float weight2() const { return w2_; }\n\n    void setWeight3(float val) { w3_ = val; }\n    float weight3() const { return w3_; }\n\n    void setWeight4(float val) { w4_ = val; }\n    float weight4() const { return w4_; }\n\n    virtual void stabilize(\n            int size, const std::vector<Mat> &motions, std::pair<int,int> range,\n            Mat *stabilizationMotions);\n\nprivate:\n    MotionModel model_;\n    Size frameSize_;\n    float trimRatio_;\n    float w1_, w2_, w3_, w4_;\n\n    std::vector<double> obj_, collb_, colub_;\n    std::vector<int> rows_, cols_;\n    std::vector<double> elems_, rowlb_, rowub_;\n\n    void set(int row, int col, double coef)\n    {\n        rows_.push_back(row);\n        cols_.push_back(col);\n        elems_.push_back(coef);\n    }\n};\n\nCV_EXPORTS Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio);\n\nCV_EXPORTS float estimateOptimalTrimRatio(const Mat &M, Size size);\n\n//! @}\n\n} // namespace videostab\n} // namespace\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/optical_flow.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP__\n#define __OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP__\n\n#include \"opencv2/core.hpp\"\n#include \"opencv2/opencv_modules.hpp\"\n\n#ifdef HAVE_OPENCV_CUDAOPTFLOW\n  #include \"opencv2/cudaoptflow.hpp\"\n#endif\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab\n//! @{\n\nclass CV_EXPORTS ISparseOptFlowEstimator\n{\npublic:\n    virtual ~ISparseOptFlowEstimator() {}\n    virtual void run(\n            InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,\n            OutputArray status, OutputArray errors) = 0;\n};\n\nclass CV_EXPORTS IDenseOptFlowEstimator\n{\npublic:\n    virtual ~IDenseOptFlowEstimator() {}\n    virtual void run(\n            InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,\n            OutputArray errors) = 0;\n};\n\nclass CV_EXPORTS PyrLkOptFlowEstimatorBase\n{\npublic:\n    PyrLkOptFlowEstimatorBase() { setWinSize(Size(21, 21)); setMaxLevel(3); }\n\n    virtual void setWinSize(Size val) { winSize_ = val; }\n    virtual Size winSize() const { return winSize_; }\n\n    virtual void setMaxLevel(int val) { maxLevel_ = val; }\n    virtual int maxLevel() const { return maxLevel_; }\n    virtual ~PyrLkOptFlowEstimatorBase() {}\n\nprotected:\n    Size winSize_;\n    int maxLevel_;\n};\n\nclass CV_EXPORTS SparsePyrLkOptFlowEstimator\n        : public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator\n{\npublic:\n    virtual void run(\n            InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,\n            OutputArray status, OutputArray errors);\n};\n\n#ifdef HAVE_OPENCV_CUDAOPTFLOW\n\nclass CV_EXPORTS SparsePyrLkOptFlowEstimatorGpu\n        : public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator\n{\npublic:\n    SparsePyrLkOptFlowEstimatorGpu();\n\n    virtual void run(\n            InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,\n            OutputArray status, OutputArray errors);\n\n    void run(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1,\n             cuda::GpuMat &status, cuda::GpuMat &errors);\n\n    void run(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1,\n             cuda::GpuMat &status);\n\nprivate:\n    Ptr<cuda::SparsePyrLKOpticalFlow> optFlowEstimator_;\n    cuda::GpuMat frame0_, frame1_, points0_, points1_, status_, errors_;\n};\n\nclass CV_EXPORTS DensePyrLkOptFlowEstimatorGpu\n        : public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator\n{\npublic:\n    DensePyrLkOptFlowEstimatorGpu();\n\n    virtual void run(\n            InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,\n            OutputArray errors);\n\nprivate:\n    Ptr<cuda::DensePyrLKOpticalFlow> optFlowEstimator_;\n    cuda::GpuMat frame0_, frame1_, flowX_, flowY_, errors_;\n};\n\n#endif\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/outlier_rejection.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_OUTLIER_REJECTION_HPP__\n#define __OPENCV_VIDEOSTAB_OUTLIER_REJECTION_HPP__\n\n#include <vector>\n#include \"opencv2/core.hpp\"\n#include \"opencv2/videostab/motion_core.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab\n//! @{\n\nclass CV_EXPORTS IOutlierRejector\n{\npublic:\n    virtual ~IOutlierRejector() {}\n\n    virtual void process(\n            Size frameSize, InputArray points0, InputArray points1, OutputArray mask) = 0;\n};\n\nclass CV_EXPORTS NullOutlierRejector : public IOutlierRejector\n{\npublic:\n    virtual void process(\n            Size frameSize, InputArray points0, InputArray points1, OutputArray mask);\n};\n\nclass CV_EXPORTS TranslationBasedLocalOutlierRejector : public IOutlierRejector\n{\npublic:\n    TranslationBasedLocalOutlierRejector();\n\n    void setCellSize(Size val) { cellSize_ = val; }\n    Size cellSize() const { return cellSize_; }\n\n    void setRansacParams(RansacParams val) { ransacParams_ = val; }\n    RansacParams ransacParams() const { return ransacParams_; }\n\n    virtual void process(\n            Size frameSize, InputArray points0, InputArray points1, OutputArray mask);\n\nprivate:\n    Size cellSize_;\n    RansacParams ransacParams_;\n\n    typedef std::vector<int> Cell;\n    std::vector<Cell> grid_;\n};\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/ring_buffer.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_RING_BUFFER_HPP__\n#define __OPENCV_VIDEOSTAB_RING_BUFFER_HPP__\n\n#include <vector>\n#include \"opencv2/imgproc.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab\n//! @{\n\ntemplate <typename T> inline T& at(int idx, std::vector<T> &items)\n{\n    return items[cv::borderInterpolate(idx, static_cast<int>(items.size()), cv::BORDER_WRAP)];\n}\n\ntemplate <typename T> inline const T& at(int idx, const std::vector<T> &items)\n{\n    return items[cv::borderInterpolate(idx, static_cast<int>(items.size()), cv::BORDER_WRAP)];\n}\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/stabilizer.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_STABILIZER_HPP__\n#define __OPENCV_VIDEOSTAB_STABILIZER_HPP__\n\n#include <vector>\n#include <ctime>\n#include \"opencv2/core.hpp\"\n#include \"opencv2/imgproc.hpp\"\n#include \"opencv2/videostab/global_motion.hpp\"\n#include \"opencv2/videostab/motion_stabilizing.hpp\"\n#include \"opencv2/videostab/frame_source.hpp\"\n#include \"opencv2/videostab/log.hpp\"\n#include \"opencv2/videostab/inpainting.hpp\"\n#include \"opencv2/videostab/deblurring.hpp\"\n#include \"opencv2/videostab/wobble_suppression.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab\n//! @{\n\nclass CV_EXPORTS StabilizerBase\n{\npublic:\n    virtual ~StabilizerBase() {}\n\n    void setLog(Ptr<ILog> ilog) { log_ = ilog; }\n    Ptr<ILog> log() const { return log_; }\n\n    void setRadius(int val) { radius_ = val; }\n    int radius() const { return radius_; }\n\n    void setFrameSource(Ptr<IFrameSource> val) { frameSource_ = val; }\n    Ptr<IFrameSource> frameSource() const { return frameSource_; }\n\n    void setMotionEstimator(Ptr<ImageMotionEstimatorBase> val) { motionEstimator_ = val; }\n    Ptr<ImageMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }\n\n    void setDeblurer(Ptr<DeblurerBase> val) { deblurer_ = val; }\n    Ptr<DeblurerBase> deblurrer() const { return deblurer_; }\n\n    void setTrimRatio(float val) { trimRatio_ = val; }\n    float trimRatio() const { return trimRatio_; }\n\n    void setCorrectionForInclusion(bool val) { doCorrectionForInclusion_ = val; }\n    bool doCorrectionForInclusion() const { return doCorrectionForInclusion_; }\n\n    void setBorderMode(int val) { borderMode_ = val; }\n    int borderMode() const { return borderMode_; }\n\n    void setInpainter(Ptr<InpainterBase> val) { inpainter_ = val; }\n    Ptr<InpainterBase> inpainter() const { return inpainter_; }\n\nprotected:\n    StabilizerBase();\n\n    void reset();\n    Mat nextStabilizedFrame();\n    bool doOneIteration();\n    virtual void setUp(const Mat &firstFrame);\n    virtual Mat estimateMotion() = 0;\n    virtual Mat estimateStabilizationMotion() = 0;\n    void stabilizeFrame();\n    virtual Mat postProcessFrame(const Mat &frame);\n    void logProcessingTime();\n\n    Ptr<ILog> log_;\n    Ptr<IFrameSource> frameSource_;\n    Ptr<ImageMotionEstimatorBase> motionEstimator_;\n    Ptr<DeblurerBase> deblurer_;\n    Ptr<InpainterBase> inpainter_;\n    int radius_;\n    float trimRatio_;\n    bool doCorrectionForInclusion_;\n    int borderMode_;\n\n    Size frameSize_;\n    Mat frameMask_;\n    int curPos_;\n    int curStabilizedPos_;\n    bool doDeblurring_;\n    Mat preProcessedFrame_;\n    bool doInpainting_;\n    Mat inpaintingMask_;\n    Mat finalFrame_;\n    std::vector<Mat> frames_;\n    std::vector<Mat> motions_; // motions_[i] is the motion from i-th to i+1-th frame\n    std::vector<float> blurrinessRates_;\n    std::vector<Mat> stabilizedFrames_;\n    std::vector<Mat> stabilizedMasks_;\n    std::vector<Mat> stabilizationMotions_;\n    clock_t processingStartTime_;\n};\n\nclass CV_EXPORTS OnePassStabilizer : public StabilizerBase, public IFrameSource\n{\npublic:\n    OnePassStabilizer();\n\n    void setMotionFilter(Ptr<MotionFilterBase> val) { motionFilter_ = val; }\n    Ptr<MotionFilterBase> motionFilter() const { return motionFilter_; }\n\n    virtual void reset();\n    virtual Mat nextFrame() { return nextStabilizedFrame(); }\n\nprotected:\n    virtual void setUp(const Mat &firstFrame);\n    virtual Mat estimateMotion();\n    virtual Mat estimateStabilizationMotion();\n    virtual Mat postProcessFrame(const Mat &frame);\n\n    Ptr<MotionFilterBase> motionFilter_;\n};\n\nclass CV_EXPORTS TwoPassStabilizer : public StabilizerBase, public IFrameSource\n{\npublic:\n    TwoPassStabilizer();\n\n    void setMotionStabilizer(Ptr<IMotionStabilizer> val) { motionStabilizer_ = val; }\n    Ptr<IMotionStabilizer> motionStabilizer() const { return motionStabilizer_; }\n\n    void setWobbleSuppressor(Ptr<WobbleSuppressorBase> val) { wobbleSuppressor_ = val; }\n    Ptr<WobbleSuppressorBase> wobbleSuppressor() const { return wobbleSuppressor_; }\n\n    void setEstimateTrimRatio(bool val) { mustEstTrimRatio_ = val; }\n    bool mustEstimateTrimaRatio() const { return mustEstTrimRatio_; }\n\n    virtual void reset();\n    virtual Mat nextFrame();\n\nprotected:\n    void runPrePassIfNecessary();\n\n    virtual void setUp(const Mat &firstFrame);\n    virtual Mat estimateMotion();\n    virtual Mat estimateStabilizationMotion();\n    virtual Mat postProcessFrame(const Mat &frame);\n\n    Ptr<IMotionStabilizer> motionStabilizer_;\n    Ptr<WobbleSuppressorBase> wobbleSuppressor_;\n    bool mustEstTrimRatio_;\n\n    int frameCount_;\n    bool isPrePassDone_;\n    bool doWobbleSuppression_;\n    std::vector<Mat> motions2_;\n    Mat suppressedFrame_;\n};\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab/wobble_suppression.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_WOBBLE_SUPPRESSION_HPP__\n#define __OPENCV_VIDEOSTAB_WOBBLE_SUPPRESSION_HPP__\n\n#include <vector>\n#include \"opencv2/core.hpp\"\n#include \"opencv2/core/cuda.hpp\"\n#include \"opencv2/videostab/global_motion.hpp\"\n#include \"opencv2/videostab/log.hpp\"\n\nnamespace cv\n{\nnamespace videostab\n{\n\n//! @addtogroup videostab\n//! @{\n\nclass CV_EXPORTS WobbleSuppressorBase\n{\npublic:\n    WobbleSuppressorBase();\n\n    virtual ~WobbleSuppressorBase() {}\n\n    void setMotionEstimator(Ptr<ImageMotionEstimatorBase> val) { motionEstimator_ = val; }\n    Ptr<ImageMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }\n\n    virtual void suppress(int idx, const Mat &frame, Mat &result) = 0;\n\n\n    // data from stabilizer\n\n    virtual void setFrameCount(int val) { frameCount_ = val; }\n    virtual int frameCount() const { return frameCount_; }\n\n    virtual void setMotions(const std::vector<Mat> &val) { motions_ = &val; }\n    virtual const std::vector<Mat>& motions() const { return *motions_; }\n\n    virtual void setMotions2(const std::vector<Mat> &val) { motions2_ = &val; }\n    virtual const std::vector<Mat>& motions2() const { return *motions2_; }\n\n    virtual void setStabilizationMotions(const std::vector<Mat> &val) { stabilizationMotions_ = &val; }\n    virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; }\n\nprotected:\n    Ptr<ImageMotionEstimatorBase> motionEstimator_;\n    int frameCount_;\n    const std::vector<Mat> *motions_;\n    const std::vector<Mat> *motions2_;\n    const std::vector<Mat> *stabilizationMotions_;\n};\n\nclass CV_EXPORTS NullWobbleSuppressor : public WobbleSuppressorBase\n{\npublic:\n    virtual void suppress(int idx, const Mat &frame, Mat &result);\n};\n\nclass CV_EXPORTS MoreAccurateMotionWobbleSuppressorBase : public WobbleSuppressorBase\n{\npublic:\n    virtual void setPeriod(int val) { period_ = val; }\n    virtual int period() const { return period_; }\n\nprotected:\n    MoreAccurateMotionWobbleSuppressorBase() { setPeriod(30); }\n\n    int period_;\n};\n\nclass CV_EXPORTS MoreAccurateMotionWobbleSuppressor : public MoreAccurateMotionWobbleSuppressorBase\n{\npublic:\n    virtual void suppress(int idx, const Mat &frame, Mat &result);\n\nprivate:\n    Mat_<float> mapx_, mapy_;\n};\n\n#if defined(HAVE_OPENCV_CUDAWARPING)\nclass CV_EXPORTS MoreAccurateMotionWobbleSuppressorGpu : public MoreAccurateMotionWobbleSuppressorBase\n{\npublic:\n    void suppress(int idx, const cuda::GpuMat &frame, cuda::GpuMat &result);\n    virtual void suppress(int idx, const Mat &frame, Mat &result);\n\nprivate:\n    cuda::GpuMat frameDevice_, resultDevice_;\n    cuda::GpuMat mapx_, mapy_;\n};\n#endif\n\n//! @}\n\n} // namespace videostab\n} // namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/videostab.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_VIDEOSTAB_HPP__\n#define __OPENCV_VIDEOSTAB_HPP__\n\n/**\n  @defgroup videostab Video Stabilization\n\nThe video stabilization module contains a set of functions and classes that can be used to solve the\nproblem of video stabilization. There are a few methods implemented, most of them are descibed in\nthe papers @cite OF06 and @cite G11 . However, there are some extensions and deviations from the orginal\npaper methods.\n\n### References\n\n 1. \"Full-Frame Video Stabilization with Motion Inpainting\"\n     Yasuyuki Matsushita, Eyal Ofek, Weina Ge, Xiaoou Tang, Senior Member, and Heung-Yeung Shum\n 2. \"Auto-Directed Video Stabilization with Robust L1 Optimal Camera Paths\"\n     Matthias Grundmann, Vivek Kwatra, Irfan Essa\n\n     @{\n         @defgroup videostab_motion Global Motion Estimation\n\nThe video stabilization module contains a set of functions and classes for global motion estimation\nbetween point clouds or between images. In the last case features are extracted and matched\ninternally. For the sake of convenience the motion estimation functions are wrapped into classes.\nBoth the functions and the classes are available.\n\n         @defgroup videostab_marching Fast Marching Method\n\nThe Fast Marching Method @cite Telea04 is used in of the video stabilization routines to do motion and\ncolor inpainting. The method is implemented is a flexible way and it's made public for other users.\n\n     @}\n\n*/\n\n#include \"opencv2/videostab/stabilizer.hpp\"\n#include \"opencv2/videostab/ring_buffer.hpp\"\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/xfeatures2d/cuda.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_XFEATURES2D_CUDA_HPP__\n#define __OPENCV_XFEATURES2D_CUDA_HPP__\n\n#include \"opencv2/core/cuda.hpp\"\n\nnamespace cv { namespace cuda {\n\n//! @addtogroup xfeatures2d_nonfree\n//! @{\n\n/** @brief Class used for extracting Speeded Up Robust Features (SURF) from an image. :\n\nThe class SURF_CUDA implements Speeded Up Robust Features descriptor. There is a fast multi-scale\nHessian keypoint detector that can be used to find the keypoints (which is the default option). But\nthe descriptors can also be computed for the user-specified keypoints. Only 8-bit grayscale images\nare supported.\n\nThe class SURF_CUDA can store results in the GPU and CPU memory. It provides functions to convert\nresults between CPU and GPU version ( uploadKeypoints, downloadKeypoints, downloadDescriptors ). The\nformat of CPU results is the same as SURF results. GPU results are stored in GpuMat. The keypoints\nmatrix is \\f$\\texttt{nFeatures} \\times 7\\f$ matrix with the CV_32FC1 type.\n\n-   keypoints.ptr\\<float\\>(X_ROW)[i] contains x coordinate of the i-th feature.\n-   keypoints.ptr\\<float\\>(Y_ROW)[i] contains y coordinate of the i-th feature.\n-   keypoints.ptr\\<float\\>(LAPLACIAN_ROW)[i] contains the laplacian sign of the i-th feature.\n-   keypoints.ptr\\<float\\>(OCTAVE_ROW)[i] contains the octave of the i-th feature.\n-   keypoints.ptr\\<float\\>(SIZE_ROW)[i] contains the size of the i-th feature.\n-   keypoints.ptr\\<float\\>(ANGLE_ROW)[i] contain orientation of the i-th feature.\n-   keypoints.ptr\\<float\\>(HESSIAN_ROW)[i] contains the response of the i-th feature.\n\nThe descriptors matrix is \\f$\\texttt{nFeatures} \\times \\texttt{descriptorSize}\\f$ matrix with the\nCV_32FC1 type.\n\nThe class SURF_CUDA uses some buffers and provides access to it. All buffers can be safely released\nbetween function calls.\n\n@sa SURF\n\n@note\n   -   An example for using the SURF keypoint matcher on GPU can be found at\n        opencv_source_code/samples/gpu/surf_keypoint_matcher.cpp\n\n */\nclass CV_EXPORTS SURF_CUDA\n{\npublic:\n    enum KeypointLayout\n    {\n        X_ROW = 0,\n        Y_ROW,\n        LAPLACIAN_ROW,\n        OCTAVE_ROW,\n        SIZE_ROW,\n        ANGLE_ROW,\n        HESSIAN_ROW,\n        ROWS_COUNT\n    };\n\n    //! the default constructor\n    SURF_CUDA();\n    //! the full constructor taking all the necessary parameters\n    explicit SURF_CUDA(double _hessianThreshold, int _nOctaves=4,\n         int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false);\n\n    //! returns the descriptor size in float's (64 or 128)\n    int descriptorSize() const;\n    //! returns the default norm type\n    int defaultNorm() const;\n\n    //! upload host keypoints to device memory\n    void uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU);\n    //! download keypoints from device to host memory\n    void downloadKeypoints(const GpuMat& keypointsGPU, std::vector<KeyPoint>& keypoints);\n\n    //! download descriptors from device to host memory\n    void downloadDescriptors(const GpuMat& descriptorsGPU, std::vector<float>& descriptors);\n\n    //! finds the keypoints using fast hessian detector used in SURF\n    //! supports CV_8UC1 images\n    //! keypoints will have nFeature cols and 6 rows\n    //! keypoints.ptr<float>(X_ROW)[i] will contain x coordinate of i'th feature\n    //! keypoints.ptr<float>(Y_ROW)[i] will contain y coordinate of i'th feature\n    //! keypoints.ptr<float>(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature\n    //! keypoints.ptr<float>(OCTAVE_ROW)[i] will contain octave of i'th feature\n    //! keypoints.ptr<float>(SIZE_ROW)[i] will contain size of i'th feature\n    //! keypoints.ptr<float>(ANGLE_ROW)[i] will contain orientation of i'th feature\n    //! keypoints.ptr<float>(HESSIAN_ROW)[i] will contain response of i'th feature\n    void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints);\n    //! finds the keypoints and computes their descriptors.\n    //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction\n    void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,\n        bool useProvidedKeypoints = false);\n\n    void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints);\n    void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors,\n        bool useProvidedKeypoints = false);\n\n    void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, std::vector<float>& descriptors,\n        bool useProvidedKeypoints = false);\n\n    void releaseMemory();\n\n    // SURF parameters\n    double hessianThreshold;\n    int nOctaves;\n    int nOctaveLayers;\n    bool extended;\n    bool upright;\n\n    //! max keypoints = min(keypointsRatio * img.size().area(), 65535)\n    float keypointsRatio;\n\n    GpuMat sum, mask1, maskSum;\n\n    GpuMat det, trace;\n\n    GpuMat maxPosBuffer;\n};\n\n//! @}\n\n}} // namespace cv { namespace cuda {\n\n#endif // __OPENCV_XFEATURES2D_CUDA_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/xfeatures2d/nonfree.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_XFEATURES2D_FEATURES_2D_HPP__\n#define __OPENCV_XFEATURES2D_FEATURES_2D_HPP__\n\n#include \"opencv2/features2d.hpp\"\n\nnamespace cv\n{\nnamespace xfeatures2d\n{\n\n//! @addtogroup xfeatures2d_nonfree\n//! @{\n\n/** @brief Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform\n(SIFT) algorithm by D. Lowe @cite Lowe04 .\n */\nclass CV_EXPORTS_W SIFT : public Feature2D\n{\npublic:\n    /**\n    @param nfeatures The number of best features to retain. The features are ranked by their scores\n    (measured in SIFT algorithm as the local contrast)\n\n    @param nOctaveLayers The number of layers in each octave. 3 is the value used in D. Lowe paper. The\n    number of octaves is computed automatically from the image resolution.\n\n    @param contrastThreshold The contrast threshold used to filter out weak features in semi-uniform\n    (low-contrast) regions. The larger the threshold, the less features are produced by the detector.\n\n    @param edgeThreshold The threshold used to filter out edge-like features. Note that the its meaning\n    is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are\n    filtered out (more features are retained).\n\n    @param sigma The sigma of the Gaussian applied to the input image at the octave \\#0. If your image\n    is captured with a weak camera with soft lenses, you might want to reduce the number.\n     */\n    CV_WRAP static Ptr<SIFT> create( int nfeatures = 0, int nOctaveLayers = 3,\n                                    double contrastThreshold = 0.04, double edgeThreshold = 10,\n                                    double sigma = 1.6);\n};\n\ntypedef SIFT SiftFeatureDetector;\ntypedef SIFT SiftDescriptorExtractor;\n\n/** @brief Class for extracting Speeded Up Robust Features from an image @cite Bay06 .\n\nThe algorithm parameters:\n-   member int extended\n    -   0 means that the basic descriptors (64 elements each) shall be computed\n    -   1 means that the extended descriptors (128 elements each) shall be computed\n-   member int upright\n    -   0 means that detector computes orientation of each feature.\n    -   1 means that the orientation is not computed (which is much, much faster). For example,\nif you match images from a stereo pair, or do image stitching, the matched features\nlikely have very similar angles, and you can speed up feature extraction by setting\nupright=1.\n-   member double hessianThreshold\nThreshold for the keypoint detector. Only features, whose hessian is larger than\nhessianThreshold are retained by the detector. Therefore, the larger the value, the less\nkeypoints you will get. A good default value could be from 300 to 500, depending from the\nimage contrast.\n-   member int nOctaves\nThe number of a gaussian pyramid octaves that the detector uses. It is set to 4 by default.\nIf you want to get very large features, use the larger value. If you want just small\nfeatures, decrease it.\n-   member int nOctaveLayers\nThe number of images within each octave of a gaussian pyramid. It is set to 2 by default.\n@note\n   -   An example using the SURF feature detector can be found at\n        opencv_source_code/samples/cpp/generic_descriptor_match.cpp\n    -   Another example using the SURF feature detector, extractor and matcher can be found at\n        opencv_source_code/samples/cpp/matcher_simple.cpp\n */\nclass CV_EXPORTS_W SURF : public Feature2D\n{\npublic:\n    /**\n    @param hessianThreshold Threshold for hessian keypoint detector used in SURF.\n    @param nOctaves Number of pyramid octaves the keypoint detector will use.\n    @param nOctaveLayers Number of octave layers within each octave.\n    @param extended Extended descriptor flag (true - use extended 128-element descriptors; false - use\n    64-element descriptors).\n    @param upright Up-right or rotated features flag (true - do not compute orientation of features;\n    false - compute orientation).\n     */\n    CV_WRAP static Ptr<SURF> create(double hessianThreshold=100,\n                  int nOctaves = 4, int nOctaveLayers = 3,\n                  bool extended = false, bool upright = false);\n\n    CV_WRAP virtual void setHessianThreshold(double hessianThreshold) = 0;\n    CV_WRAP virtual double getHessianThreshold() const = 0;\n\n    CV_WRAP virtual void setNOctaves(int nOctaves) = 0;\n    CV_WRAP virtual int getNOctaves() const = 0;\n\n    CV_WRAP virtual void setNOctaveLayers(int nOctaveLayers) = 0;\n    CV_WRAP virtual int getNOctaveLayers() const = 0;\n\n    CV_WRAP virtual void setExtended(bool extended) = 0;\n    CV_WRAP virtual bool getExtended() const = 0;\n\n    CV_WRAP virtual void setUpright(bool upright) = 0;\n    CV_WRAP virtual bool getUpright() const = 0;\n};\n\ntypedef SURF SurfFeatureDetector;\ntypedef SURF SurfDescriptorExtractor;\n\n//! @}\n\n}\n} /* namespace cv */\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/xfeatures2d.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_XFEATURES2D_HPP__\n#define __OPENCV_XFEATURES2D_HPP__\n\n#include \"opencv2/features2d.hpp\"\n#include \"opencv2/xfeatures2d/nonfree.hpp\"\n\n/** @defgroup xfeatures2d Extra 2D Features Framework\n@{\n    @defgroup xfeatures2d_experiment Experimental 2D Features Algorithms\n\nThis section describes experimental algorithms for 2d feature detection.\n\n    @defgroup xfeatures2d_nonfree Non-free 2D Features Algorithms\n\nThis section describes two popular algorithms for 2d feature detection, SIFT and SURF, that are\nknown to be patented. Use them at your own risk.\n\n@}\n*/\n\nnamespace cv\n{\nnamespace xfeatures2d\n{\n\n//! @addtogroup xfeatures2d_experiment\n//! @{\n\n/** @brief Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, described in @cite AOV12 .\n\nThe algorithm propose a novel keypoint descriptor inspired by the human visual system and more\nprecisely the retina, coined Fast Retina Key- point (FREAK). A cascade of binary strings is\ncomputed by efficiently comparing image intensities over a retinal sampling pattern. FREAKs are in\ngeneral faster to compute with lower memory load and also more robust than SIFT, SURF or BRISK.\nThey are competitive alternatives to existing keypoints in particular for embedded applications.\n\n@note\n   -   An example on how to use the FREAK descriptor can be found at\n        opencv_source_code/samples/cpp/freak_demo.cpp\n */\nclass CV_EXPORTS_W FREAK : public Feature2D\n{\npublic:\n\n    enum\n    {\n        NB_SCALES = 64, NB_PAIRS = 512, NB_ORIENPAIRS = 45\n    };\n\n    /**\n    @param orientationNormalized Enable orientation normalization.\n    @param scaleNormalized Enable scale normalization.\n    @param patternScale Scaling of the description pattern.\n    @param nOctaves Number of octaves covered by the detected keypoints.\n    @param selectedPairs (Optional) user defined selected pairs indexes,\n     */\n    CV_WRAP static Ptr<FREAK> create(bool orientationNormalized = true,\n                             bool scaleNormalized = true,\n                             float patternScale = 22.0f,\n                             int nOctaves = 4,\n                             const std::vector<int>& selectedPairs = std::vector<int>());\n};\n\n\n/** @brief The class implements the keypoint detector introduced by @cite Agrawal08, synonym of StarDetector. :\n */\nclass CV_EXPORTS_W StarDetector : public Feature2D\n{\npublic:\n    //! the full constructor\n    CV_WRAP static Ptr<StarDetector> create(int maxSize=45, int responseThreshold=30,\n                         int lineThresholdProjected=10,\n                         int lineThresholdBinarized=8,\n                         int suppressNonmaxSize=5);\n};\n\n/*\n * BRIEF Descriptor\n */\n\n/** @brief Class for computing BRIEF descriptors described in @cite calon2010 .\n\n@param bytes legth of the descriptor in bytes, valid values are: 16, 32 (default) or 64 .\n@param use_orientation sample patterns using keypoints orientation, disabled by default.\n\n */\nclass CV_EXPORTS_W BriefDescriptorExtractor : public Feature2D\n{\npublic:\n    CV_WRAP static Ptr<BriefDescriptorExtractor> create( int bytes = 32, bool use_orientation = false );\n};\n\n/** @brief Class implementing the locally uniform comparison image descriptor, described in @cite LUCID\n\nAn image descriptor that can be computed very fast, while being\nabout as robust as, for example, SURF or BRIEF.\n */\nclass CV_EXPORTS_W LUCID : public Feature2D\n{\npublic:\n    /**\n     * @param lucid_kernel kernel for descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth\n     * @param blur_kernel kernel for blurring image prior to descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth\n     */\n    CV_WRAP static Ptr<LUCID> create(const int lucid_kernel, const int blur_kernel);\n};\n\n\n/*\n* LATCH Descriptor\n*/\n\n/** latch Class for computing the LATCH descriptor.\nIf you find this code useful, please add a reference to the following paper in your work:\nGil Levi and Tal Hassner, \"LATCH: Learned Arrangements of Three Patch Codes\", arXiv preprint arXiv:1501.03719, 15 Jan. 2015\n\nLATCH is a binary descriptor based on learned comparisons of triplets of image patches.\n\n* bytes is the size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1\n* rotationInvariance - whether or not the descriptor should compansate for orientation changes.\n* half_ssd_size - the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x\n    then the half_ssd_size should be (7-1)/2 = 3.\n\nNote: the descriptor can be coupled with any keypoint extractor. The only demand is that if you use set rotationInvariance = True then\n    you will have to use an extractor which estimates the patch orientation (in degrees). Examples for such extractors are ORB and SIFT.\n\nNote: a complete example can be found under /samples/cpp/tutorial_code/xfeatures2D/latch_match.cpp\n\n*/\nclass CV_EXPORTS_W LATCH : public Feature2D\n{\npublic:\n\tCV_WRAP static Ptr<LATCH> create(int bytes = 32, bool rotationInvariance = true, int half_ssd_size=3);\n};\n\n/** @brief Class implementing DAISY descriptor, described in @cite Tola10\n\n@param radius radius of the descriptor at the initial scale\n@param q_radius amount of radial range division quantity\n@param q_theta amount of angular range division quantity\n@param q_hist amount of gradient orientations range division quantity\n@param norm choose descriptors normalization type, where\nDAISY::NRM_NONE will not do any normalization (default),\nDAISY::NRM_PARTIAL mean that histograms are normalized independently for L2 norm equal to 1.0,\nDAISY::NRM_FULL mean that descriptors are normalized for L2 norm equal to 1.0,\nDAISY::NRM_SIFT mean that descriptors are normalized for L2 norm equal to 1.0 but no individual one is bigger than 0.154 as in SIFT\n@param H optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image\n@param interpolation switch to disable interpolation for speed improvement at minor quality loss\n@param use_orientation sample patterns using keypoints orientation, disabled by default.\n\n */\nclass CV_EXPORTS_W DAISY : public Feature2D\n{\npublic:\n    enum\n    {\n        NRM_NONE = 100, NRM_PARTIAL = 101, NRM_FULL = 102, NRM_SIFT = 103,\n    };\n    CV_WRAP static Ptr<DAISY> create( float radius = 15, int q_radius = 3, int q_theta = 8,\n                int q_hist = 8, int norm = DAISY::NRM_NONE, InputArray H = noArray(),\n                bool interpolation = true, bool use_orientation = false );\n\n    /** @overload\n     * @param image image to extract descriptors\n     * @param keypoints of interest within image\n     * @param descriptors resulted descriptors array\n     */\n    virtual void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) = 0;\n\n    virtual void compute( InputArrayOfArrays images,\n                          std::vector<std::vector<KeyPoint> >& keypoints,\n                          OutputArrayOfArrays descriptors );\n\n    /** @overload\n     * @param image image to extract descriptors\n     * @param roi region of interest within image\n     * @param descriptors resulted descriptors array for roi image pixels\n     */\n    virtual void compute( InputArray image, Rect roi, OutputArray descriptors ) = 0;\n\n    /**@overload\n     * @param image image to extract descriptors\n     * @param descriptors resulted descriptors array for all image pixels\n     */\n    virtual void compute( InputArray image, OutputArray descriptors ) = 0;\n\n    /**\n     * @param y position y on image\n     * @param x position x on image\n     * @param orientation orientation on image (0->360)\n     * @param descriptor supplied array for descriptor storage\n     */\n    virtual void GetDescriptor( double y, double x, int orientation, float* descriptor ) const = 0;\n\n    /**\n     * @param y position y on image\n     * @param x position x on image\n     * @param orientation orientation on image (0->360)\n     * @param descriptor supplied array for descriptor storage\n     * @param H homography matrix for warped grid\n     */\n    virtual bool GetDescriptor( double y, double x, int orientation, float* descriptor, double* H ) const = 0;\n\n    /**\n     * @param y position y on image\n     * @param x position x on image\n     * @param orientation orientation on image (0->360)\n     * @param descriptor supplied array for descriptor storage\n     */\n    virtual void GetUnnormalizedDescriptor( double y, double x, int orientation, float* descriptor ) const = 0;\n\n    /**\n     * @param y position y on image\n     * @param x position x on image\n     * @param orientation orientation on image (0->360)\n     * @param descriptor supplied array for descriptor storage\n     * @param H homography matrix for warped grid\n     */\n    virtual bool GetUnnormalizedDescriptor( double y, double x, int orientation, float* descriptor , double *H ) const = 0;\n\n};\n\n/** @brief Class implementing the MSD (*Maximal Self-Dissimilarity*) keypoint detector, described in @cite Tombari14.\n\nThe algorithm implements a novel interest point detector stemming from the intuition that image patches\nwhich are highly dissimilar over a relatively large extent of their surroundings hold the property of\nbeing repeatable and distinctive. This concept of \"contextual self-dissimilarity\" reverses the key\nparadigm of recent successful techniques such as the Local Self-Similarity descriptor and the Non-Local\nMeans filter, which build upon the presence of similar - rather than dissimilar - patches. Moreover,\nit extends to contextual information the local self-dissimilarity notion embedded in established\ndetectors of corner-like interest points, thereby achieving enhanced repeatability, distinctiveness and\nlocalization accuracy.\n\n*/\n\nclass CV_EXPORTS_W MSDDetector : public Feature2D {\n\npublic:\n\n    static Ptr<MSDDetector> create(int m_patch_radius = 3, int m_search_area_radius = 5,\n            int m_nms_radius = 5, int m_nms_scale_radius = 0, float m_th_saliency = 250.0f, int m_kNN = 4,\n            float m_scale_factor = 1.25f, int m_n_scales = -1, bool m_compute_orientation = false);\n};\n\n//! @}\n\n}\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc/disparity_filter.hpp",
    "content": "/*\n *  By downloading, copying, installing or using the software you agree to this license.\n *  If you do not agree to this license, do not download, install,\n *  copy or use the software.\n *\n *\n *  License Agreement\n *  For Open Source Computer Vision Library\n *  (3 - clause BSD License)\n *\n *  Redistribution and use in source and binary forms, with or without modification,\n *  are permitted provided that the following conditions are met :\n *\n *  *Redistributions of source code must retain the above copyright notice,\n *  this list of conditions and the following disclaimer.\n *\n *  * Redistributions in binary form must reproduce the above copyright notice,\n *  this list of conditions and the following disclaimer in the documentation\n *  and / or other materials provided with the distribution.\n *\n *  * Neither the names of the copyright holders nor the names of the contributors\n *  may be used to endorse or promote products derived from this software\n *  without specific prior written permission.\n *\n *  This software is provided by the copyright holders and contributors \"as is\" and\n *  any express or implied warranties, including, but not limited to, the implied\n *  warranties of merchantability and fitness for a particular purpose are disclaimed.\n *  In no event shall copyright holders or contributors be liable for any direct,\n *  indirect, incidental, special, exemplary, or consequential damages\n *  (including, but not limited to, procurement of substitute goods or services;\n *  loss of use, data, or profits; or business interruption) however caused\n *  and on any theory of liability, whether in contract, strict liability,\n *  or tort(including negligence or otherwise) arising in any way out of\n *  the use of this software, even if advised of the possibility of such damage.\n */\n\n#ifndef __OPENCV_DISPARITYFILTER_HPP__\n#define __OPENCV_DISPARITYFILTER_HPP__\n#ifdef __cplusplus\n\n#include <opencv2/core.hpp>\n#include <opencv2/calib3d.hpp>\n\nnamespace cv {\nnamespace ximgproc {\n\n//! @addtogroup ximgproc_filters\n//! @{\n\n/** @brief Main interface for all disparity map filters.\n */\nclass CV_EXPORTS_W DisparityFilter : public Algorithm\n{\npublic:\n\n    /** @brief Apply filtering to the disparity map.\n\n    @param disparity_map_left disparity map of the left view, 1 channel, CV_16S type. Implicitly assumes that disparity\n    values are scaled by 16 (one-pixel disparity corresponds to the value of 16 in the disparity map). Disparity map\n    can have any resolution, it will be automatically resized to fit left_view resolution.\n\n    @param left_view left view of the original stereo-pair to guide the filtering process, 8-bit single-channel\n    or three-channel image.\n\n    @param filtered_disparity_map output disparity map.\n\n    @param disparity_map_right optional argument, some implementations might also use the disparity map\n    of the right view to compute confidence maps, for instance.\n\n    @param ROI region of the disparity map to filter. Optional, usually it should be set automatically.\n\n    @param right_view optional argument, some implementations might also use the right view of the original\n    stereo-pair.\n     */\n    CV_WRAP virtual void filter(InputArray disparity_map_left, InputArray left_view, OutputArray filtered_disparity_map, InputArray disparity_map_right = Mat(), Rect ROI = Rect(), InputArray right_view = Mat()) = 0;\n};\n\n/** @brief Disparity map filter based on Weighted Least Squares filter (in form of Fast Global Smoother that\nis a lot faster than traditional Weighted Least Squares filter implementations) and optional use of\nleft-right-consistency-based confidence to refine the results in half-occlusions and uniform areas.\n */\nclass CV_EXPORTS_W DisparityWLSFilter : public DisparityFilter\n{\npublic:\n    /** filter parameters */\n\n    /** @brief Lambda is a parameter defining the amount of regularization during filtering. Larger values force\n    filtered disparity map edges to adhere more to source image edges. Typical value is 8000.\n     */\n    CV_WRAP virtual double getLambda() = 0;\n    /** @see getLambda */\n    CV_WRAP virtual void setLambda(double _lambda) = 0;\n    /** @brief SigmaColor is a parameter defining how sensitive the filtering process is to source image edges.\n    Large values can lead to disparity leakage through low-contrast edges. Small values can make the filter too\n    sensitive to noise and textures in the source image. Typical values range from 0.8 to 2.0.\n     */\n    CV_WRAP virtual double getSigmaColor() = 0;\n    /** @see getSigmaColor */\n    CV_WRAP virtual void setSigmaColor(double _sigma_color) = 0;\n\n    /** confidence-related parameters */\n\n    /** @brief LRCthresh is a threshold of disparity difference used in left-right-consistency check during\n    confidence map computation. The default value of 24 (1.5 pixels) is virtually always good enough.\n     */\n    CV_WRAP virtual int getLRCthresh() = 0;\n    /** @see getLRCthresh */\n    CV_WRAP virtual void setLRCthresh(int _LRC_thresh) = 0;\n    /** @brief DepthDiscontinuityRadius is a parameter used in confidence computation. It defines the size of\n    low-confidence regions around depth discontinuities.\n     */\n    CV_WRAP virtual int getDepthDiscontinuityRadius() = 0;\n    /** @see getDepthDiscontinuityRadius */\n    CV_WRAP virtual void setDepthDiscontinuityRadius(int _disc_radius) = 0;\n    /** @brief Get the confidence map that was used in the last filter call. It is a CV_32F one-channel image\n    with values ranging from 0.0 (totally untrusted regions of the raw disparity map) to 255.0 (regions containing\n    correct disparity values with a high degree of confidence).\n     */\n    CV_WRAP virtual Mat getConfidenceMap() = 0;\n    /** @brief Get the ROI used in the last filter call\n     */\n    CV_WRAP virtual Rect getROI() = 0;\n};\n\n/** @brief Convenience factory method that creates an instance of DisparityWLSFilter and sets up all the relevant\nfilter parameters automatically based on the matcher instance. Currently supports only StereoBM and StereoSGBM.\n\n@param matcher_left stereo matcher instance that will be used with the filter\n*/\nCV_EXPORTS_W\nPtr<DisparityWLSFilter> createDisparityWLSFilter(Ptr<StereoMatcher> matcher_left);\n\n/** @brief Convenience method to set up the matcher for computing the right-view disparity map\nthat is required in case of filtering with confidence.\n\n@param matcher_left main stereo matcher instance that will be used with the filter\n*/\nCV_EXPORTS_W\nPtr<StereoMatcher> createRightMatcher(Ptr<StereoMatcher> matcher_left);\n\n/** @brief More generic factory method, create instance of DisparityWLSFilter and execute basic\ninitialization routines. When using this method you will need to set-up the ROI, matchers and\nother parameters by yourself.\n\n@param use_confidence filtering with confidence requires two disparity maps (for the left and right views) and is\napproximately two times slower. However, quality is typically significantly better.\n*/\nCV_EXPORTS_W\nPtr<DisparityWLSFilter> createDisparityWLSFilterGeneric(bool use_confidence);\n\n//////////////////////////////////////////////////////////////////////////\n//////////////////////////////////////////////////////////////////////////\n\n/** @brief Function for reading ground truth disparity maps. Supports basic Middlebury\nand MPI-Sintel formats. Note that the resulting disparity map is scaled by 16.\n\n@param src_path path to the image, containing ground-truth disparity map\n\n@param dst output disparity map, CV_16S depth\n\n@result returns zero if successfully read the ground truth\n */\nCV_EXPORTS\nint readGT(String src_path,OutputArray dst);\n\n/** @brief Function for computing mean square error for disparity maps\n\n@param GT ground truth disparity map\n\n@param src disparity map to evaluate\n\n@param ROI region of interest\n\n@result returns mean square error between GT and src\n */\nCV_EXPORTS\ndouble computeMSE(InputArray GT, InputArray src, Rect ROI);\n\n/** @brief Function for computing the percent of \"bad\" pixels in the disparity map\n(pixels where error is higher than a specified threshold)\n\n@param GT ground truth disparity map\n\n@param src disparity map to evaluate\n\n@param ROI region of interest\n\n@param thresh threshold used to determine \"bad\" pixels\n\n@result returns mean square error between GT and src\n */\nCV_EXPORTS\ndouble computeBadPixelPercent(InputArray GT, InputArray src, Rect ROI, int thresh=24/*1.5 pixels*/);\n\n/** @brief Function for creating a disparity map visualization (clamped CV_8U image)\n\n@param src input disparity map (CV_16S depth)\n\n@param dst output visualization\n\n@param scale disparity map will be multiplied by this value for visualization\n */\nCV_EXPORTS\nvoid getDisparityVis(InputArray src,OutputArray dst,double scale=1.0);\n\n//! @}\n}\n}\n#endif\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc/edge_filter.hpp",
    "content": "/*\n *  By downloading, copying, installing or using the software you agree to this license.\n *  If you do not agree to this license, do not download, install,\n *  copy or use the software.\n *\n *\n *  License Agreement\n *  For Open Source Computer Vision Library\n *  (3 - clause BSD License)\n *\n *  Redistribution and use in source and binary forms, with or without modification,\n *  are permitted provided that the following conditions are met :\n *\n *  *Redistributions of source code must retain the above copyright notice,\n *  this list of conditions and the following disclaimer.\n *\n *  * Redistributions in binary form must reproduce the above copyright notice,\n *  this list of conditions and the following disclaimer in the documentation\n *  and / or other materials provided with the distribution.\n *\n *  * Neither the names of the copyright holders nor the names of the contributors\n *  may be used to endorse or promote products derived from this software\n *  without specific prior written permission.\n *\n *  This software is provided by the copyright holders and contributors \"as is\" and\n *  any express or implied warranties, including, but not limited to, the implied\n *  warranties of merchantability and fitness for a particular purpose are disclaimed.\n *  In no event shall copyright holders or contributors be liable for any direct,\n *  indirect, incidental, special, exemplary, or consequential damages\n *  (including, but not limited to, procurement of substitute goods or services;\n *  loss of use, data, or profits; or business interruption) however caused\n *  and on any theory of liability, whether in contract, strict liability,\n *  or tort(including negligence or otherwise) arising in any way out of\n *  the use of this software, even if advised of the possibility of such damage.\n */\n\n#ifndef __OPENCV_EDGEFILTER_HPP__\n#define __OPENCV_EDGEFILTER_HPP__\n#ifdef __cplusplus\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace ximgproc\n{\n\n//! @addtogroup ximgproc_filters\n//! @{\n\nenum EdgeAwareFiltersList\n{\n    DTF_NC,\n    DTF_IC,\n    DTF_RF,\n\n    GUIDED_FILTER,\n    AM_FILTER\n};\n\n\n/** @brief Interface for realizations of Domain Transform filter.\n\nFor more details about this filter see @cite Gastal11 .\n */\nclass CV_EXPORTS_W DTFilter : public Algorithm\n{\npublic:\n\n    /** @brief Produce domain transform filtering operation on source image.\n\n    @param src filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.\n\n    @param dst destination image.\n\n    @param dDepth optional depth of the output image. dDepth can be set to -1, which will be equivalent\n    to src.depth().\n     */\n    CV_WRAP virtual void filter(InputArray src, OutputArray dst, int dDepth = -1) = 0;\n};\n\n/** @brief Factory method, create instance of DTFilter and produce initialization routines.\n\n@param guide guided image (used to build transformed distance, which describes edge structure of\nguided image).\n\n@param sigmaSpatial \\f${\\sigma}_H\\f$ parameter in the original article, it's similar to the sigma in the\ncoordinate space into bilateralFilter.\n\n@param sigmaColor \\f${\\sigma}_r\\f$ parameter in the original article, it's similar to the sigma in the\ncolor space into bilateralFilter.\n\n@param mode one form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for\nfiltering 2D signals in the article.\n\n@param numIters optional number of iterations used for filtering, 3 is quite enough.\n\nFor more details about Domain Transform filter parameters, see the original article @cite Gastal11 and\n[Domain Transform filter homepage](http://www.inf.ufrgs.br/~eslgastal/DomainTransform/).\n */\nCV_EXPORTS_W\nPtr<DTFilter> createDTFilter(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);\n\n/** @brief Simple one-line Domain Transform filter call. If you have multiple images to filter with the same\nguided image then use DTFilter interface to avoid extra computations on initialization stage.\n\n@param guide guided image (also called as joint image) with unsigned 8-bit or floating-point 32-bit\ndepth and up to 4 channels.\n@param src filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.\n@param dst\n@param sigmaSpatial \\f${\\sigma}_H\\f$ parameter in the original article, it's similar to the sigma in the\ncoordinate space into bilateralFilter.\n@param sigmaColor \\f${\\sigma}_r\\f$ parameter in the original article, it's similar to the sigma in the\ncolor space into bilateralFilter.\n@param mode one form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for\nfiltering 2D signals in the article.\n@param numIters optional number of iterations used for filtering, 3 is quite enough.\n@sa bilateralFilter, guidedFilter, amFilter\n */\nCV_EXPORTS_W\nvoid dtFilter(InputArray guide, InputArray src, OutputArray dst, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);\n\n//////////////////////////////////////////////////////////////////////////\n//////////////////////////////////////////////////////////////////////////\n\n/** @brief Interface for realizations of Guided Filter.\n\nFor more details about this filter see @cite Kaiming10 .\n */\nclass CV_EXPORTS_W GuidedFilter : public Algorithm\n{\npublic:\n\n    /** @brief Apply Guided Filter to the filtering image.\n\n    @param src filtering image with any numbers of channels.\n\n    @param dst output image.\n\n    @param dDepth optional depth of the output image. dDepth can be set to -1, which will be equivalent\n    to src.depth().\n     */\n    CV_WRAP virtual void filter(InputArray src, OutputArray dst, int dDepth = -1) = 0;\n};\n\n/** @brief Factory method, create instance of GuidedFilter and produce initialization routines.\n\n@param guide guided image (or array of images) with up to 3 channels, if it have more then 3\nchannels then only first 3 channels will be used.\n\n@param radius radius of Guided Filter.\n\n@param eps regularization term of Guided Filter. \\f${eps}^2\\f$ is similar to the sigma in the color\nspace into bilateralFilter.\n\nFor more details about Guided Filter parameters, see the original article @cite Kaiming10 .\n */\nCV_EXPORTS_W Ptr<GuidedFilter> createGuidedFilter(InputArray guide, int radius, double eps);\n\n/** @brief Simple one-line Guided Filter call.\n\nIf you have multiple images to filter with the same guided image then use GuidedFilter interface to\navoid extra computations on initialization stage.\n\n@param guide guided image (or array of images) with up to 3 channels, if it have more then 3\nchannels then only first 3 channels will be used.\n\n@param src filtering image with any numbers of channels.\n\n@param dst output image.\n\n@param radius radius of Guided Filter.\n\n@param eps regularization term of Guided Filter. \\f${eps}^2\\f$ is similar to the sigma in the color\nspace into bilateralFilter.\n\n@param dDepth optional depth of the output image.\n\n@sa bilateralFilter, dtFilter, amFilter */\nCV_EXPORTS_W void guidedFilter(InputArray guide, InputArray src, OutputArray dst, int radius, double eps, int dDepth = -1);\n\n//////////////////////////////////////////////////////////////////////////\n//////////////////////////////////////////////////////////////////////////\n\n/** @brief Interface for Adaptive Manifold Filter realizations.\n\nFor more details about this filter see @cite Gastal12 and References_.\n\nBelow listed optional parameters which may be set up with Algorithm::set function.\n-   member double sigma_s = 16.0\nSpatial standard deviation.\n-   member double sigma_r = 0.2\nColor space standard deviation.\n-   member int tree_height = -1\nHeight of the manifold tree (default = -1 : automatically computed).\n-   member int num_pca_iterations = 1\nNumber of iterations to computed the eigenvector.\n-   member bool adjust_outliers = false\nSpecify adjust outliers using Eq. 9 or not.\n-   member bool use_RNG = true\nSpecify use random number generator to compute eigenvector or not.\n */\nclass CV_EXPORTS_W AdaptiveManifoldFilter : public Algorithm\n{\npublic:\n    /** @brief Apply high-dimensional filtering using adaptive manifolds.\n\n    @param src filtering image with any numbers of channels.\n\n    @param dst output image.\n\n    @param joint optional joint (also called as guided) image with any numbers of channels.\n     */\n    CV_WRAP virtual void filter(InputArray src, OutputArray dst, InputArray joint = noArray()) = 0;\n\n    CV_WRAP virtual void collectGarbage() = 0;\n\n    CV_WRAP static Ptr<AdaptiveManifoldFilter> create();\n\n    /** @see setSigmaS */\n    virtual double getSigmaS() const = 0;\n    /** @copybrief getSigmaS @see getSigmaS */\n    virtual void setSigmaS(double val) = 0;\n    /** @see setSigmaR */\n    virtual double getSigmaR() const = 0;\n    /** @copybrief getSigmaR @see getSigmaR */\n    virtual void setSigmaR(double val) = 0;\n    /** @see setTreeHeight */\n    virtual int getTreeHeight() const = 0;\n    /** @copybrief getTreeHeight @see getTreeHeight */\n    virtual void setTreeHeight(int val) = 0;\n    /** @see setPCAIterations */\n    virtual int getPCAIterations() const = 0;\n    /** @copybrief getPCAIterations @see getPCAIterations */\n    virtual void setPCAIterations(int val) = 0;\n    /** @see setAdjustOutliers */\n    virtual bool getAdjustOutliers() const = 0;\n    /** @copybrief getAdjustOutliers @see getAdjustOutliers */\n    virtual void setAdjustOutliers(bool val) = 0;\n    /** @see setUseRNG */\n    virtual bool getUseRNG() const = 0;\n    /** @copybrief getUseRNG @see getUseRNG */\n    virtual void setUseRNG(bool val) = 0;\n};\n\n/** @brief Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines.\n\n@param sigma_s spatial standard deviation.\n\n@param sigma_r color space standard deviation, it is similar to the sigma in the color space into\nbilateralFilter.\n\n@param adjust_outliers optional, specify perform outliers adjust operation or not, (Eq. 9) in the\noriginal paper.\n\nFor more details about Adaptive Manifold Filter parameters, see the original article @cite Gastal12 .\n\n@note Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1]\ncolor range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same\nsigmas in bilateralFilter and dtFilter functions.\n*/\nCV_EXPORTS_W Ptr<AdaptiveManifoldFilter> createAMFilter(double sigma_s, double sigma_r, bool adjust_outliers = false);\n\n/** @brief Simple one-line Adaptive Manifold Filter call.\n\n@param joint joint (also called as guided) image or array of images with any numbers of channels.\n\n@param src filtering image with any numbers of channels.\n\n@param dst output image.\n\n@param sigma_s spatial standard deviation.\n\n@param sigma_r color space standard deviation, it is similar to the sigma in the color space into\nbilateralFilter.\n\n@param adjust_outliers optional, specify perform outliers adjust operation or not, (Eq. 9) in the\noriginal paper.\n\n@note Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1]\ncolor range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same\nsigmas in bilateralFilter and dtFilter functions. @sa bilateralFilter, dtFilter, guidedFilter\n*/\nCV_EXPORTS_W void amFilter(InputArray joint, InputArray src, OutputArray dst, double sigma_s, double sigma_r, bool adjust_outliers = false);\n\n//////////////////////////////////////////////////////////////////////////\n//////////////////////////////////////////////////////////////////////////\n\n/** @brief Applies the joint bilateral filter to an image.\n\n@param joint Joint 8-bit or floating-point, 1-channel or 3-channel image.\n\n@param src Source 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint\nimage.\n\n@param dst Destination image of the same size and type as src .\n\n@param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,\nit is computed from sigmaSpace .\n\n@param sigmaColor Filter sigma in the color space. A larger value of the parameter means that\nfarther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in\nlarger areas of semi-equal color.\n\n@param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that\nfarther pixels will influence each other as long as their colors are close enough (see sigmaColor ).\nWhen d\\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is\nproportional to sigmaSpace .\n\n@param borderType\n\n@note bilateralFilter and jointBilateralFilter use L1 norm to compute difference between colors.\n\n@sa bilateralFilter, amFilter\n*/\nCV_EXPORTS_W\nvoid jointBilateralFilter(InputArray joint, InputArray src, OutputArray dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT);\n\n//////////////////////////////////////////////////////////////////////////\n//////////////////////////////////////////////////////////////////////////\n\n/** @brief Applies the rolling guidance filter to an image.\n\n@param src Source 8-bit or floating-point, 1-channel or 3-channel image.\n\n@param dst Destination image of the same size and type as src.\n\n@param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,\nit is computed from sigmaSpace .\n\n@param sigmaColor Filter sigma in the color space. A larger value of the parameter means that\nfarther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in\nlarger areas of semi-equal color.\n\n@param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that\nfarther pixels will influence each other as long as their colors are close enough (see sigmaColor ).\nWhen d\\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is\nproportional to sigmaSpace .\n\n@param numOfIter Number of iterations of joint edge-preserving filtering applied on the source image.\n\n@param borderType\n\n@note  rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.\n\n@sa jointBilateralFilter, bilateralFilter, amFilter\n*/\nCV_EXPORTS_W\nvoid rollingGuidanceFilter(InputArray src, OutputArray dst, int d = -1, double sigmaColor = 25, double sigmaSpace = 3, int numOfIter = 4, int borderType = BORDER_DEFAULT);\n\n//////////////////////////////////////////////////////////////////////////\n//////////////////////////////////////////////////////////////////////////\n\n\n/** @brief Interface for implementations of Fast Global Smoother filter.\n\nFor more details about this filter see @cite Min2014 and @cite Farbman2008 .\n*/\nclass CV_EXPORTS_W FastGlobalSmootherFilter : public Algorithm\n{\npublic:\n    /** @brief Apply smoothing operation to the source image.\n\n    @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.\n\n    @param dst destination image.\n    */\n    CV_WRAP virtual void filter(InputArray src, OutputArray dst) = 0;\n};\n\n/** @brief Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.\n\n@param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.\n\n@param lambda parameter defining the amount of regularization\n\n@param sigma_color parameter, that is similar to color space sigma in bilateralFilter.\n\n@param lambda_attenuation internal parameter, defining how much lambda decreases after each iteration. Normally,\nit should be 0.25. Setting it to 1.0 may lead to streaking artifacts.\n\n@param num_iter number of iterations used for filtering, 3 is usually enough.\n\nFor more details about Fast Global Smoother parameters, see the original paper @cite Min2014. However, please note that\nthere are several differences. Lambda attenuation described in the paper is implemented a bit differently so do not\nexpect the results to be identical to those from the paper; sigma_color values from the paper should be multiplied by 255.0 to\nachieve the same effect. Also, in case of image filtering where source and guide image are the same, authors\npropose to dynamically update the guide image after each iteration. To maximize the performance this feature\nwas not implemented here.\n*/\nCV_EXPORTS_W Ptr<FastGlobalSmootherFilter> createFastGlobalSmootherFilter(InputArray guide, double lambda, double sigma_color, double lambda_attenuation=0.25, int num_iter=3);\n\n/** @brief Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same\nguide then use FastGlobalSmootherFilter interface to avoid extra computations.\n\n@param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.\n\n@param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.\n\n@param dst destination image.\n\n@param lambda parameter defining the amount of regularization\n\n@param sigma_color parameter, that is similar to color space sigma in bilateralFilter.\n\n@param lambda_attenuation internal parameter, defining how much lambda decreases after each iteration. Normally,\nit should be 0.25. Setting it to 1.0 may lead to streaking artifacts.\n\n@param num_iter number of iterations used for filtering, 3 is usually enough.\n*/\nCV_EXPORTS_W void fastGlobalSmootherFilter(InputArray guide, InputArray src, OutputArray dst, double lambda, double sigma_color, double lambda_attenuation=0.25, int num_iter=3);\n\n/** @brief Global image smoothing via L0 gradient minimization.\n\n@param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point depth.\n\n@param dst destination image.\n\n@param lambda parameter defining the smooth term weight.\n\n@param kappa parameter defining the increasing factor of the weight of the gradient data term.\n\nFor more details about L0 Smoother, see the original paper @cite xu2011image.\n*/\nCV_EXPORTS_W void l0Smooth(InputArray src, OutputArray dst, double lambda = 0.02, double kappa = 2.0);\n//! @}\n}\n}\n#endif\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc/estimated_covariance.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this license.\nIf you do not agree to this license, do not download, install,\ncopy or use the software.\n\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2000-2015, Intel Corporation, all rights reserved.\nCopyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\nCopyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.\nCopyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.\nCopyright (C) 2015, OpenCV Foundation, all rights reserved.\nCopyright (C) 2015, Itseez Inc., all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are disclaimed.\nIn no event shall copyright holders or contributors be liable for any direct,\nindirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n\nAlgorithmic details of this algorithm can be found at:\n * O. Green, Y. Birk, \"A Computationally Efficient Algorithm for the 2D Covariance Method\", ACM/IEEE International Conference on High Performance Computing, Networking, Storage and Analysis, Denver, Colorado, 2013\nA previous and less efficient version of the algorithm can be found:\n * O. Green, L. David, A. Galperin, Y. Birk, \"Efficient parallel computation of the estimated covariance matrix\", arXiv, 2013\n\n\n*/\n#ifndef __OPENCV_ESTIMATECOVARIANCE_HPP__\n#define __OPENCV_ESTIMATECOVARIANCE_HPP__\n#ifdef __cplusplus\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace ximgproc\n{\n\n/** @brief Computes the estimated covariance matrix of an image using the sliding\nwindow forumlation.\n\n@param src The source image. Input image must be of a complex type.\n@param dst The destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).\n@param windowRows The number of rows in the window.\n@param windowCols The number of cols in the window.\nThe window size parameters control the accuracy of the estimation.\nThe sliding window moves over the entire image from the top-left corner\nto the bottom right corner. Each location of the window represents a sample.\nIf the window is the size of the image, then this gives the exact covariance matrix.\nFor all other cases, the sizes of the window will impact the number of samples\nand the number of elements in the estimated covariance matrix.\n*/\n\nCV_EXPORTS_W void covarianceEstimation(InputArray src, OutputArray dst, int windowRows, int windowCols);\n\n}\n}\n#endif\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc/fast_hough_transform.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2015, Smart Engines Ltd, all rights reserved.\n// Copyright (C) 2015, Institute for Information Transmission Problems of the Russian Academy of Sciences (Kharkevich Institute), all rights reserved.\n// Copyright (C) 2015, Dmitry Nikolaev, Simon Karpenko, Michail Aliev, Elena Kuznetsova, all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_FAST_HOUGH_TRANSFORM_HPP__\n#define __OPENCV_FAST_HOUGH_TRANSFORM_HPP__\n#ifdef __cplusplus\n\n#include \"opencv2/core.hpp\"\n\n\nnamespace cv { namespace ximgproc {\n\n/**\n* @brief   Specifies the part of Hough space to calculate\n* @details The enum specifies the part of Hough space to calculate. Each\n* member specifies primarily direction of lines (horizontal or vertical)\n* and the direction of angle changes.\n* Direction of angle changes is from multiples of 90 to odd multiples of 45.\n* The image considered to be written top-down and left-to-right.\n* Angles are started from vertical line and go clockwise.\n* Separate quarters and halves are written in orientation they should be in\n* full Hough space.\n*/\nenum AngleRangeOption\n{\n  ARO_0_45    = 0, //< Vertical primarily direction and clockwise angle changes\n  ARO_45_90   = 1, //< Horizontal primarily direction and counterclockwise angle changes\n  ARO_90_135  = 2, //< Horizontal primarily direction and clockwise angle changes\n  ARO_315_0   = 3, //< Vertical primarily direction and counterclockwise angle changes\n  ARO_315_45  = 4, //< Vertical primarily direction\n  ARO_45_135  = 5, //< Horizontal primarily direction\n  ARO_315_135 = 6, //< Full set of directions\n  ARO_CTR_HOR = 7, //< 90 +/- atan(0.5), interval approximately from 64.5 to 116.5 degrees.\n                   //< It is used for calculating Fast Hough Transform for images skewed by atan(0.5).\n  ARO_CTR_VER = 8  //< +/- atan(0.5), interval approximately from 333.5(-26.5) to 26.5 degrees\n                   //< It is used for calculating Fast Hough Transform for images skewed by atan(0.5).\n};\n\n/**\n * @brief   Specifies binary operations.\n * @details The enum specifies binary operations, that is such ones which involve\n *          two operands. Formally, a binary operation @f$ f @f$ on a set @f$ S @f$\n *          is a binary relation that maps elements of the Cartesian product\n *          @f$ S \\times S @f$ to @f$ S @f$:\n*           @f[ f: S \\times S \\to S @f]\n * @ingroup MinUtils_MathOper\n */\nenum HoughOp\n{\n  FHT_MIN = 0,  //< Binary minimum operation. The constant specifies the binary minimum operation\n                //< @f$ f @f$ that is defined as follows: @f[ f(x, y) = \\min(x, y) @f]\n  FHT_MAX = 1,  //< Binary maximum operation. The constant specifies the binary maximum operation\n                //< @f$ f @f$ that is defined as follows: @f[ f(x, y) = \\max(x, y) @f]\n  FHT_ADD = 2,  //< Binary addition operation. The constant specifies the binary addition operation\n                //< @f$ f @f$ that is defined as follows: @f[ f(x, y) = x + y @f]\n  FHT_AVE = 3   //< Binary average operation. The constant specifies the binary average operation\n                //< @f$ f @f$ that is defined as follows: @f[ f(x, y) = \\frac{x + y}{2} @f]\n};\n\n/**\n* @brief   Specifies to do or not to do skewing of Hough transform image\n* @details The enum specifies to do or not to do skewing of Hough transform image\n* so it would be no cycling in Hough transform image through borders of image.\n*/\nenum HoughDeskewOption\n{\n  HDO_RAW    = 0, //< Use raw cyclic image\n  HDO_DESKEW = 1  //< Prepare deskewed image\n};\n\n/**\n * @brief   Specifies the degree of rules validation.\n * @details The enum specifies the degree of rules validation. This can be used,\n *          for example, to choose a proper way of input arguments validation.\n */\ntypedef enum {\n  RO_STRICT          = 0x00,  ///< Validate each rule in a proper way.\n  RO_IGNORE_BORDERS  = 0x01,  ///< Skip validations of image borders.\n} RulesOption;\n\n/**\n* @brief   Calculates 2D Fast Hough transform of an image.\n* @param   dst         The destination image, result of transformation.\n* @param   src         The source (input) image.\n* @param   dstMatDepth The depth of destination image\n* @param   op          The operation to be applied, see cv::HoughOp\n* @param   angleRange  The part of Hough space to calculate, see cv::AngleRangeOption\n* @param   makeSkew    Specifies to do or not to do image skewing, see cv::HoughDeskewOption\n*\n* The function calculates the fast Hough transform for full, half or quarter\n* range of angles.\n*/\nCV_EXPORTS void FastHoughTransform( InputArray  src,\n                                    OutputArray dst,\n                                    int         dstMatDepth,\n                                    int         angleRange = ARO_315_135,\n                                    int         op = FHT_ADD,\n                                    int         makeSkew = HDO_DESKEW );\n\n/**\n* @brief   Calculates coordinates of line segment corresponded by point in Hough space.\n* @param   houghPoint  Point in Hough space.\n* @param   srcImgInfo The source (input) image of Hough transform.\n* @param   angleRange  The part of Hough space where point is situated, see cv::AngleRangeOption\n* @param   makeSkew    Specifies to do or not to do image skewing, see cv::HoughDeskewOption\n* @param   rules       Specifies strictness of line segment calculating, see cv::RulesOption\n* @retval  [Vec4i]     Coordinates of line segment corresponded by point in Hough space.\n* @remarks If rules parameter set to RO_STRICT\n           then returned line cut along the border of source image.\n* @remarks If rules parameter set to RO_WEAK then in case of point, which belongs\n           the incorrect part of Hough image, returned line will not intersect source image.\n*\n* The function calculates coordinates of line segment corresponded by point in Hough space.\n*/\nCV_EXPORTS Vec4i HoughPoint2Line(const Point &houghPoint,\n                                 InputArray  srcImgInfo,\n                                 int         angleRange = ARO_315_135,\n                                 int         makeSkew = HDO_DESKEW,\n                                 int         rules = RO_IGNORE_BORDERS );\n\n} }// namespace cv::ximgproc\n\n#endif //__cplusplus\n#endif //__OPENCV_FAST_HOUGH_TRANSFORM_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc/lsc.hpp",
    "content": "/*********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright (c) 2014, 2015\n * Zhengqin Li <li-zq12 at mails dot tsinghua dot edu dot cn>\n * Jiansheng Chen <jschenthu at mail dot tsinghua dot edu dot cn>\n * Tsinghua University\n *\n *  Redistribution and use in source and binary forms, with or without\n *  modification, are permitted provided that the following conditions\n *  are met:\n *\n *   * Redistributions of source code must retain the above copyright\n *     notice, this list of conditions and the following disclaimer.\n *   * Redistributions in binary form must reproduce the above\n *     copyright notice, this list of conditions and the following\n *     disclaimer in the documentation and/or other materials provided\n *     with the distribution.\n *   * Neither the name of the copyright holders nor the names of its\n *     contributors may be used to endorse or promote products derived\n *     from this software without specific prior written permission.\n *\n *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n *  \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n *  POSSIBILITY OF SUCH DAMAGE.\n *********************************************************************/\n\n/*\n\n \"Superpixel Segmentation using Linear Spectral Clustering\"\n Zhengqin Li, Jiansheng Chen, IEEE Conference on Computer Vision and Pattern\n Recognition (CVPR), Jun. 2015\n\n OpenCV port by: Cristian Balint <cristian dot balint at gmail dot com>\n */\n\n#ifndef __OPENCV_LSC_HPP__\n#define __OPENCV_LSC_HPP__\n#ifdef __cplusplus\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace ximgproc\n{\n\n//! @addtogroup ximgproc_superpixel\n//! @{\n\n/** @brief Class implementing the LSC (Linear Spectral Clustering) superpixels\nalgorithm described in @cite LiCVPR2015LSC.\n\nLSC (Linear Spectral Clustering) produces compact and uniform superpixels with low\ncomputational costs. Basically, a normalized cuts formulation of the superpixel\nsegmentation is adopted based on a similarity metric that measures the color\nsimilarity and space proximity between image pixels. LSC is of linear computational\ncomplexity and high memory efficiency and is able to preserve global properties of images\n\n */\n\nclass CV_EXPORTS_W SuperpixelLSC : public Algorithm\n{\npublic:\n\n    /** @brief Calculates the actual amount of superpixels on a given segmentation computed\n    and stored in SuperpixelLSC object.\n     */\n    CV_WRAP virtual int getNumberOfSuperpixels() const = 0;\n\n    /** @brief Calculates the superpixel segmentation on a given image with the initialized\n    parameters in the SuperpixelLSC object.\n\n    This function can be called again without the need of initializing the algorithm with\n    createSuperpixelLSC(). This save the computational cost of allocating memory for all the\n    structures of the algorithm.\n\n    @param num_iterations Number of iterations. Higher number improves the result.\n\n    The function computes the superpixels segmentation of an image with the parameters initialized\n    with the function createSuperpixelLSC(). The algorithms starts from a grid of superpixels and\n    then refines the boundaries by proposing updates of edges boundaries.\n\n     */\n    CV_WRAP virtual void iterate( int num_iterations = 10 ) = 0;\n\n    /** @brief Returns the segmentation labeling of the image.\n\n    Each label represents a superpixel, and each pixel is assigned to one superpixel label.\n\n    @param labels_out Return: A CV_32SC1 integer array containing the labels of the superpixel\n    segmentation. The labels are in the range [0, getNumberOfSuperpixels()].\n\n    The function returns an image with the labels of the superpixel segmentation. The labels are in\n    the range [0, getNumberOfSuperpixels()].\n     */\n    CV_WRAP virtual void getLabels( OutputArray labels_out ) const = 0;\n\n    /** @brief Returns the mask of the superpixel segmentation stored in SuperpixelLSC object.\n\n    @param image Return: CV_8U1 image mask where -1 indicates that the pixel is a superpixel border,\n    and 0 otherwise.\n\n    @param thick_line If false, the border is only one pixel wide, otherwise all pixels at the border\n    are masked.\n\n    The function return the boundaries of the superpixel segmentation.\n     */\n    CV_WRAP virtual void getLabelContourMask( OutputArray image, bool thick_line = true ) const = 0;\n\n    /** @brief Enforce label connectivity.\n\n    @param min_element_size The minimum element size in percents that should be absorbed into a bigger\n    superpixel. Given resulted average superpixel size valid value should be in 0-100 range, 25 means\n    that less then a quarter sized superpixel should be absorbed, this is default.\n\n    The function merge component that is too small, assigning the previously found adjacent label\n    to this component. Calling this function may change the final number of superpixels.\n     */\n    CV_WRAP virtual void enforceLabelConnectivity( int min_element_size = 20 ) = 0;\n\n\n};\n\n/** @brief Class implementing the LSC (Linear Spectral Clustering) superpixels\n\n@param image Image to segment\n@param region_size Chooses an average superpixel size measured in pixels\n@param ratio Chooses the enforcement of superpixel compactness factor of superpixel\n\nThe function initializes a SuperpixelLSC object for the input image. It sets the parameters of\nsuperpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future\ncomputing iterations over the given image. An example of LSC is ilustrated in the following picture.\nFor enanched results it is recommended for color images to preprocess image with little gaussian blur\nwith a small 3 x 3 kernel and additional conversion into CieLAB color space.\n\n![image](pics/superpixels_lsc.png)\n\n */\n\n    CV_EXPORTS_W Ptr<SuperpixelLSC> createSuperpixelLSC( InputArray image, int region_size = 10, float ratio = 0.075f );\n\n//! @}\n\n}\n}\n#endif\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc/seeds.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2014, Beat Kueng (beat-kueng@gmx.net), Lukas Vogel, Morten Lysgaard\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_SEEDS_HPP__\n#define __OPENCV_SEEDS_HPP__\n#ifdef __cplusplus\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace ximgproc\n{\n\n//! @addtogroup ximgproc_superpixel\n//! @{\n\n/** @brief Class implementing the SEEDS (Superpixels Extracted via Energy-Driven Sampling) superpixels\nalgorithm described in @cite VBRV14 .\n\nThe algorithm uses an efficient hill-climbing algorithm to optimize the superpixels' energy\nfunction that is based on color histograms and a boundary term, which is optional. The energy\nfunction encourages superpixels to be of the same color, and if the boundary term is activated, the\nsuperpixels have smooth boundaries and are of similar shape. In practice it starts from a regular\ngrid of superpixels and moves the pixels or blocks of pixels at the boundaries to refine the\nsolution. The algorithm runs in real-time using a single CPU.\n */\nclass CV_EXPORTS_W SuperpixelSEEDS : public Algorithm\n{\npublic:\n\n    /** @brief Calculates the superpixel segmentation on a given image stored in SuperpixelSEEDS object.\n\n    The function computes the superpixels segmentation of an image with the parameters initialized\n    with the function createSuperpixelSEEDS().\n     */\n    CV_WRAP virtual int getNumberOfSuperpixels() = 0;\n\n    /** @brief Calculates the superpixel segmentation on a given image with the initialized\n    parameters in the SuperpixelSEEDS object.\n\n    This function can be called again for other images without the need of initializing the\n    algorithm with createSuperpixelSEEDS(). This save the computational cost of allocating memory\n    for all the structures of the algorithm.\n\n    @param img Input image. Supported formats: CV_8U, CV_16U, CV_32F. Image size & number of\n    channels must match with the initialized image size & channels with the function\n    createSuperpixelSEEDS(). It should be in HSV or Lab color space. Lab is a bit better, but also\n    slower.\n\n    @param num_iterations Number of pixel level iterations. Higher number improves the result.\n\n    The function computes the superpixels segmentation of an image with the parameters initialized\n    with the function createSuperpixelSEEDS(). The algorithms starts from a grid of superpixels and\n    then refines the boundaries by proposing updates of blocks of pixels that lie at the boundaries\n    from large to smaller size, finalizing with proposing pixel updates. An illustrative example\n    can be seen below.\n\n    ![image](pics/superpixels_blocks2.png)\n     */\n    CV_WRAP virtual void iterate(InputArray img, int num_iterations=4) = 0;\n\n    /** @brief Returns the segmentation labeling of the image.\n\n    Each label represents a superpixel, and each pixel is assigned to one superpixel label.\n\n    @param labels_out Return: A CV_32UC1 integer array containing the labels of the superpixel\n    segmentation. The labels are in the range [0, getNumberOfSuperpixels()].\n\n    The function returns an image with ssthe labels of the superpixel segmentation. The labels are in\n    the range [0, getNumberOfSuperpixels()].\n     */\n    CV_WRAP virtual void getLabels(OutputArray labels_out) = 0;\n\n    /** @brief Returns the mask of the superpixel segmentation stored in SuperpixelSEEDS object.\n\n    @param image Return: CV_8UC1 image mask where -1 indicates that the pixel is a superpixel border,\n    and 0 otherwise.\n\n    @param thick_line If false, the border is only one pixel wide, otherwise all pixels at the border\n    are masked.\n\n    The function return the boundaries of the superpixel segmentation.\n\n    @note\n       -   (Python) A demo on how to generate superpixels in images from the webcam can be found at\n            opencv_source_code/samples/python2/seeds.py\n        -   (cpp) A demo on how to generate superpixels in images from the webcam can be found at\n            opencv_source_code/modules/ximgproc/samples/seeds.cpp. By adding a file image as a command\n            line argument, the static image will be used instead of the webcam.\n        -   It will show a window with the video from the webcam with the superpixel boundaries marked\n            in red (see below). Use Space to switch between different output modes. At the top of the\n            window there are 4 sliders, from which the user can change on-the-fly the number of\n            superpixels, the number of block levels, the strength of the boundary prior term to modify\n            the shape, and the number of iterations at pixel level. This is useful to play with the\n            parameters and set them to the user convenience. In the console the frame-rate of the\n            algorithm is indicated.\n\n    ![image](pics/superpixels_demo.png)\n     */\n    CV_WRAP virtual void getLabelContourMask(OutputArray image, bool thick_line = false) = 0;\n\n    virtual ~SuperpixelSEEDS() {}\n};\n\n/** @brief Initializes a SuperpixelSEEDS object.\n\n@param image_width Image width.\n@param image_height Image height.\n@param image_channels Number of channels of the image.\n@param num_superpixels Desired number of superpixels. Note that the actual number may be smaller\ndue to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to\nget the actual number.\n@param num_levels Number of block levels. The more levels, the more accurate is the segmentation,\nbut needs more memory and CPU time.\n@param prior enable 3x3 shape smoothing term if \\>0. A larger value leads to smoother shapes. prior\nmust be in the range [0, 5].\n@param histogram_bins Number of histogram bins.\n@param double_step If true, iterate each block level twice for higher accuracy.\n\nThe function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of\nthe image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS\nsuperpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and\ndouble_step.\n\nThe number of levels in num_levels defines the amount of block levels that the algorithm use in the\noptimization. The initialization is a grid, in which the superpixels are equally distributed through\nthe width and the height of the image. The larger blocks correspond to the superpixel size, and the\nlevels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,\nrecursively until the smaller block level. An example of initialization of 4 block levels is\nillustrated in the following figure.\n\n![image](pics/superpixels_blocks.png)\n */\nCV_EXPORTS_W Ptr<SuperpixelSEEDS> createSuperpixelSEEDS(\n    int image_width, int image_height, int image_channels,\n    int num_superpixels, int num_levels, int prior = 2,\n    int histogram_bins=5, bool double_step = false);\n\n//! @}\n\n}\n}\n#endif\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc/segmentation.hpp",
    "content": "/*\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n*/\n\n#ifndef __OPENCV_XIMGPROC_SEGMENTATION_HPP__\n#define __OPENCV_XIMGPROC_SEGMENTATION_HPP__\n\n#include <opencv2/core.hpp>\n\nnamespace cv {\n    namespace ximgproc {\n        namespace segmentation {\n            //! @addtogroup ximgproc_segmentation\n            //! @{\n\n                    /** @brief Graph Based Segmentation Algorithm.\n                        The class implements the algorithm described in @cite PFF2004 .\n                     */\n                    class CV_EXPORTS_W GraphSegmentation : public Algorithm {\n                        public:\n                            /** @brief Segment an image and store output in dst\n                                @param src The input image. Any number of channel (1 (Eg: Gray), 3 (Eg: RGB), 4 (Eg: RGB-D)) can be provided\n                                @param dst The output segmentation. It's a CV_32SC1 Mat with the same number of cols and rows as input image, with an unique, sequential, id for each pixel.\n                            */\n                            CV_WRAP virtual void processImage(InputArray src, OutputArray dst) = 0;\n\n                            CV_WRAP virtual void setSigma(double sigma) = 0;\n                            CV_WRAP virtual double getSigma() = 0;\n\n                            CV_WRAP virtual void setK(float k) = 0;\n                            CV_WRAP virtual float getK() = 0;\n\n                            CV_WRAP virtual void setMinSize(int min_size) = 0;\n                            CV_WRAP virtual int getMinSize() = 0;\n                    };\n\n                    /** @brief Creates a graph based segmentor\n                        @param sigma The sigma parameter, used to smooth image\n                        @param k The k parameter of the algorythm\n                        @param min_size The minimum size of segments\n                     */\n                    CV_EXPORTS_W Ptr<GraphSegmentation> createGraphSegmentation(double sigma=0.5, float k=300, int min_size=100);\n            //! @}\n\n            // Represent an edge between two pixels\n            class Edge {\n                public:\n                    int from;\n                    int to;\n                    float weight;\n\n                    bool operator <(const Edge& e) const {\n                        return weight < e.weight;\n                    }\n            };\n\n            // A point in the sets of points\n            class PointSetElement {\n                public:\n                    int p;\n                    int size;\n\n                    PointSetElement() { }\n\n                    PointSetElement(int p_) {\n                        p = p_;\n                        size = 1;\n                    }\n            };\n\n            // An object to manage set of points, who can be fusionned\n            class PointSet {\n                public:\n                    PointSet(int nb_elements_);\n                    ~PointSet();\n\n                    int nb_elements;\n\n                    // Return the main point of the point's set\n                    int getBasePoint(int p);\n\n                    // Join two sets of points, based on their main point\n                    void joinPoints(int p_a, int p_b);\n\n                    // Return the set size of a set (based on the main point)\n                    int size(unsigned int p) { return mapping[p].size; }\n\n                private:\n                    PointSetElement* mapping;\n\n            };\n\n        }\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc/slic.hpp",
    "content": "/*********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright (c) 2013\n * Radhakrishna Achanta\n * email : Radhakrishna [dot] Achanta [at] epfl [dot] ch\n * web : http://ivrl.epfl.ch/people/achanta\n *\n *  Redistribution and use in source and binary forms, with or without\n *  modification, are permitted provided that the following conditions\n *  are met:\n *\n *   * Redistributions of source code must retain the above copyright\n *     notice, this list of conditions and the following disclaimer.\n *   * Redistributions in binary form must reproduce the above\n *     copyright notice, this list of conditions and the following\n *     disclaimer in the documentation and/or other materials provided\n *     with the distribution.\n *   * Neither the name of the copyright holders nor the names of its\n *     contributors may be used to endorse or promote products derived\n *     from this software without specific prior written permission.\n *\n *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n *  \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n *  POSSIBILITY OF SUCH DAMAGE.\n *********************************************************************/\n\n/*\n \"SLIC Superpixels Compared to State-of-the-art Superpixel Methods\"\n Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua,\n and Sabine Susstrunk, IEEE TPAMI, Volume 34, Issue 11, Pages 2274-2282,\n November 2012.\n\n \"SLIC Superpixels\" Radhakrishna Achanta, Appu Shaji, Kevin Smith,\n Aurelien Lucchi, Pascal Fua, and Sabine Süsstrunk, EPFL Technical\n Report no. 149300, June 2010.\n\n OpenCV port by: Cristian Balint <cristian dot balint at gmail dot com>\n */\n\n#ifndef __OPENCV_SLIC_HPP__\n#define __OPENCV_SLIC_HPP__\n#ifdef __cplusplus\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace ximgproc\n{\n\n//! @addtogroup ximgproc_superpixel\n//! @{\n\n/** @brief Class implementing the SLIC (Simple Linear Iterative Clustering) superpixels\nalgorithm described in @cite Achanta2012.\n\nSLIC (Simple Linear Iterative Clustering) clusters pixels using pixel channels and image plane space\nto efficiently generate compact, nearly uniform superpixels. The simplicity of approach makes it\nextremely easy to use a lone parameter specifies the number of superpixels and the efficiency of\nthe algorithm makes it very practical.\n\n */\n\nclass CV_EXPORTS_W SuperpixelSLIC : public Algorithm\n{\npublic:\n\n    /** @brief Calculates the actual amount of superpixels on a given segmentation computed\n    and stored in SuperpixelSLIC object.\n     */\n    CV_WRAP virtual int getNumberOfSuperpixels() const = 0;\n\n    /** @brief Calculates the superpixel segmentation on a given image with the initialized\n    parameters in the SuperpixelSLIC object.\n\n    This function can be called again without the need of initializing the algorithm with\n    createSuperpixelSLIC(). This save the computational cost of allocating memory for all the\n    structures of the algorithm.\n\n    @param num_iterations Number of iterations. Higher number improves the result.\n\n    The function computes the superpixels segmentation of an image with the parameters initialized\n    with the function createSuperpixelSLIC(). The algorithms starts from a grid of superpixels and\n    then refines the boundaries by proposing updates of edges boundaries.\n\n     */\n    CV_WRAP virtual void iterate( int num_iterations = 10 ) = 0;\n\n    /** @brief Returns the segmentation labeling of the image.\n\n    Each label represents a superpixel, and each pixel is assigned to one superpixel label.\n\n    @param labels_out Return: A CV_32SC1 integer array containing the labels of the superpixel\n    segmentation. The labels are in the range [0, getNumberOfSuperpixels()].\n\n    The function returns an image with the labels of the superpixel segmentation. The labels are in\n    the range [0, getNumberOfSuperpixels()].\n     */\n    CV_WRAP virtual void getLabels( OutputArray labels_out ) const = 0;\n\n    /** @brief Returns the mask of the superpixel segmentation stored in SuperpixelSLIC object.\n\n    @param image Return: CV_8U1 image mask where -1 indicates that the pixel is a superpixel border,\n    and 0 otherwise.\n\n    @param thick_line If false, the border is only one pixel wide, otherwise all pixels at the border\n    are masked.\n\n    The function return the boundaries of the superpixel segmentation.\n     */\n    CV_WRAP virtual void getLabelContourMask( OutputArray image, bool thick_line = true ) const = 0;\n\n    /** @brief Enforce label connectivity.\n\n    @param min_element_size The minimum element size in percents that should be absorbed into a bigger\n    superpixel. Given resulted average superpixel size valid value should be in 0-100 range, 25 means\n    that less then a quarter sized superpixel should be absorbed, this is default.\n\n    The function merge component that is too small, assigning the previously found adjacent label\n    to this component. Calling this function may change the final number of superpixels.\n     */\n    CV_WRAP virtual void enforceLabelConnectivity( int min_element_size = 25 ) = 0;\n\n\n};\n\n/** @brief Class implementing the SLIC (Simple Linear Iterative Clustering) superpixels\n\n@param image Image to segment\n@param algorithm Chooses the algorithm variant to use:\nSLIC segments image using a desired region_size, and in addition\nSLICO will choose an adaptive compactness factor.\n@param region_size Chooses an average superpixel size measured in pixels\n@param ruler Chooses the enforcement of superpixel smoothness factor of superpixel\n\nThe function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed\nsuperpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future\ncomputing iterations over the given image. An example of SLIC versus SLICO is ilustrated in the\nfollowing picture.\n\n![image](pics/slic_slico_kermit.png)\n\n */\n\n    enum SLIC { SLIC = 100, SLICO = 101 };\n\n    CV_EXPORTS_W Ptr<SuperpixelSLIC> createSuperpixelSLIC( InputArray image, int algorithm = SLICO,\n                                                           int region_size = 10, float ruler = 10.0f );\n\n//! @}\n\n}\n}\n#endif\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc/sparse_match_interpolator.hpp",
    "content": "/*\n *  By downloading, copying, installing or using the software you agree to this license.\n *  If you do not agree to this license, do not download, install,\n *  copy or use the software.\n *\n *\n *  License Agreement\n *  For Open Source Computer Vision Library\n *  (3 - clause BSD License)\n *\n *  Redistribution and use in source and binary forms, with or without modification,\n *  are permitted provided that the following conditions are met :\n *\n *  *Redistributions of source code must retain the above copyright notice,\n *  this list of conditions and the following disclaimer.\n *\n *  * Redistributions in binary form must reproduce the above copyright notice,\n *  this list of conditions and the following disclaimer in the documentation\n *  and / or other materials provided with the distribution.\n *\n *  * Neither the names of the copyright holders nor the names of the contributors\n *  may be used to endorse or promote products derived from this software\n *  without specific prior written permission.\n *\n *  This software is provided by the copyright holders and contributors \"as is\" and\n *  any express or implied warranties, including, but not limited to, the implied\n *  warranties of merchantability and fitness for a particular purpose are disclaimed.\n *  In no event shall copyright holders or contributors be liable for any direct,\n *  indirect, incidental, special, exemplary, or consequential damages\n *  (including, but not limited to, procurement of substitute goods or services;\n *  loss of use, data, or profits; or business interruption) however caused\n *  and on any theory of liability, whether in contract, strict liability,\n *  or tort(including negligence or otherwise) arising in any way out of\n *  the use of this software, even if advised of the possibility of such damage.\n */\n\n#ifndef __OPENCV_SPARSEMATCHINTERPOLATOR_HPP__\n#define __OPENCV_SPARSEMATCHINTERPOLATOR_HPP__\n#ifdef __cplusplus\n\n#include <opencv2/core.hpp>\n\nnamespace cv {\nnamespace ximgproc {\n\n//! @addtogroup ximgproc_filters\n//! @{\n\n/** @brief Main interface for all filters, that take sparse matches as an\ninput and produce a dense per-pixel matching (optical flow) as an output.\n */\nclass CV_EXPORTS_W SparseMatchInterpolator : public Algorithm\n{\npublic:\n    /** @brief Interpolate input sparse matches.\n\n    @param from_image first of the two matched images, 8-bit single-channel or three-channel.\n\n    @param from_points points of the from_image for which there are correspondences in the\n    to_image (Point2f vector, size shouldn't exceed 32767)\n\n    @param to_image second of the two matched images, 8-bit single-channel or three-channel.\n\n    @param to_points points in the to_image corresponding to from_points\n    (Point2f vector, size shouldn't exceed 32767)\n\n    @param dense_flow output dense matching (two-channel CV_32F image)\n     */\n    CV_WRAP virtual void interpolate(InputArray from_image, InputArray from_points,\n                                     InputArray to_image  , InputArray to_points,\n                                     OutputArray dense_flow) = 0;\n};\n\n/** @brief Sparse match interpolation algorithm based on modified locally-weighted affine\nestimator from @cite Revaud2015 and Fast Global Smoother as post-processing filter.\n */\nclass CV_EXPORTS_W EdgeAwareInterpolator : public SparseMatchInterpolator\n{\npublic:\n    /** @brief K is a number of nearest-neighbor matches considered, when fitting a locally affine\n    model. Usually it should be around 128. However, lower values would make the interpolation\n    noticeably faster.\n     */\n    CV_WRAP virtual void setK(int _k) = 0;\n    /** @see setK */\n    CV_WRAP virtual int  getK() = 0;\n\n    /** @brief Sigma is a parameter defining how fast the weights decrease in the locally-weighted affine\n    fitting. Higher values can help preserve fine details, lower values can help to get rid of noise in the\n    output flow.\n     */\n    CV_WRAP virtual void  setSigma(float _sigma) = 0;\n    /** @see setSigma */\n    CV_WRAP virtual float getSigma() = 0;\n\n    /** @brief Lambda is a parameter defining the weight of the edge-aware term in geodesic distance,\n    should be in the range of 0 to 1000.\n     */\n    CV_WRAP virtual void  setLambda(float _lambda) = 0;\n    /** @see setLambda */\n    CV_WRAP virtual float getLambda() = 0;\n\n    /** @brief Sets whether the fastGlobalSmootherFilter() post-processing is employed. It is turned on by\n    default.\n     */\n    CV_WRAP virtual void setUsePostProcessing(bool _use_post_proc) = 0;\n    /** @see setUsePostProcessing */\n    CV_WRAP virtual bool getUsePostProcessing() = 0;\n\n    /** @brief Sets the respective fastGlobalSmootherFilter() parameter.\n     */\n    CV_WRAP virtual void  setFGSLambda(float _lambda) = 0;\n    /** @see setFGSLambda */\n    CV_WRAP virtual float getFGSLambda() = 0;\n\n    /** @see setFGSLambda */\n    CV_WRAP virtual void  setFGSSigma(float _sigma) = 0;\n    /** @see setFGSLambda */\n    CV_WRAP virtual float getFGSSigma() = 0;\n};\n\n/** @brief Factory method that creates an instance of the\nEdgeAwareInterpolator.\n*/\nCV_EXPORTS_W\nPtr<EdgeAwareInterpolator> createEdgeAwareInterpolator();\n\n//! @}\n}\n}\n#endif\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc/structured_edge_detection.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_STRUCTURED_EDGE_DETECTION_HPP__\n#define __OPENCV_STRUCTURED_EDGE_DETECTION_HPP__\n#ifdef __cplusplus\n\n/** @file\n@date Jun 17, 2014\n@author Yury Gitman\n */\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace ximgproc\n{\n\n//! @addtogroup ximgproc_edge\n//! @{\n\n/*!\n  Helper class for training part of [P. Dollar and C. L. Zitnick. Structured Forests for Fast Edge Detection, 2013].\n */\nclass CV_EXPORTS_W RFFeatureGetter : public Algorithm\n{\npublic:\n\n    /*!\n     * This functions extracts feature channels from src.\n     * Than StructureEdgeDetection uses this feature space\n     * to detect edges.\n     *\n     * \\param src : source image to extract features\n     * \\param features : output n-channel floating point feature matrix.\n     *\n     * \\param gnrmRad : __rf.options.gradientNormalizationRadius\n     * \\param gsmthRad : __rf.options.gradientSmoothingRadius\n     * \\param shrink : __rf.options.shrinkNumber\n     * \\param outNum : __rf.options.numberOfOutputChannels\n     * \\param gradNum : __rf.options.numberOfGradientOrientations\n     */\n    CV_WRAP virtual void getFeatures(const Mat &src, Mat &features,\n                                     const int gnrmRad,\n                                     const int gsmthRad,\n                                     const int shrink,\n                                     const int outNum,\n                                     const int gradNum) const = 0;\n};\n\nCV_EXPORTS_W Ptr<RFFeatureGetter> createRFFeatureGetter();\n\n\n\n/** @brief Class implementing edge detection algorithm from @cite Dollar2013 :\n */\nclass CV_EXPORTS_W StructuredEdgeDetection : public Algorithm\n{\npublic:\n\n    /** @brief The function detects edges in src and draw them to dst.\n\n    The algorithm underlies this function is much more robust to texture presence, than common\n    approaches, e.g. Sobel\n    @param src source image (RGB, float, in [0;1]) to detect edges\n    @param dst destination image (grayscale, float, in [0;1]) where edges are drawn\n    @sa Sobel, Canny\n     */\n    CV_WRAP virtual void detectEdges(const Mat &src, CV_OUT Mat &dst) const = 0;\n};\n\n/*!\n* The only constructor\n*\n* \\param model : name of the file where the model is stored\n* \\param howToGetFeatures : optional object inheriting from RFFeatureGetter.\n*                           You need it only if you would like to train your\n*                           own forest, pass NULL otherwise\n*/\nCV_EXPORTS_W Ptr<StructuredEdgeDetection> createStructuredEdgeDetection(const String &model,\n    Ptr<const RFFeatureGetter> howToGetFeatures = Ptr<RFFeatureGetter>());\n\n//! @}\n\n}\n}\n#endif\n#endif /* __OPENCV_STRUCTURED_EDGE_DETECTION_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/ximgproc.hpp",
    "content": "/*\n *  By downloading, copying, installing or using the software you agree to this license.\n *  If you do not agree to this license, do not download, install,\n *  copy or use the software.\n *  \n *  \n *  License Agreement\n *  For Open Source Computer Vision Library\n *  (3 - clause BSD License)\n *  \n *  Redistribution and use in source and binary forms, with or without modification,\n *  are permitted provided that the following conditions are met :\n *  \n *  *Redistributions of source code must retain the above copyright notice,\n *  this list of conditions and the following disclaimer.\n *  \n *  * Redistributions in binary form must reproduce the above copyright notice,\n *  this list of conditions and the following disclaimer in the documentation\n *  and / or other materials provided with the distribution.\n *  \n *  * Neither the names of the copyright holders nor the names of the contributors\n *  may be used to endorse or promote products derived from this software\n *  without specific prior written permission.\n *  \n *  This software is provided by the copyright holders and contributors \"as is\" and\n *  any express or implied warranties, including, but not limited to, the implied\n *  warranties of merchantability and fitness for a particular purpose are disclaimed.\n *  In no event shall copyright holders or contributors be liable for any direct,\n *  indirect, incidental, special, exemplary, or consequential damages\n *  (including, but not limited to, procurement of substitute goods or services;\n *  loss of use, data, or profits; or business interruption) however caused\n *  and on any theory of liability, whether in contract, strict liability,\n *  or tort(including negligence or otherwise) arising in any way out of\n *  the use of this software, even if advised of the possibility of such damage.\n */\n\n#ifndef __OPENCV_XIMGPROC_HPP__\n#define __OPENCV_XIMGPROC_HPP__\n\n#include \"ximgproc/edge_filter.hpp\"\n#include \"ximgproc/disparity_filter.hpp\"\n#include \"ximgproc/sparse_match_interpolator.hpp\"\n#include \"ximgproc/structured_edge_detection.hpp\"\n#include \"ximgproc/seeds.hpp\"\n#include \"ximgproc/segmentation.hpp\"\n#include \"ximgproc/fast_hough_transform.hpp\"\n#include \"ximgproc/estimated_covariance.hpp\"\n#include \"ximgproc/slic.hpp\"\n#include \"ximgproc/lsc.hpp\"\n\n/** @defgroup ximgproc Extended Image Processing\n  @{\n    @defgroup ximgproc_edge Structured forests for fast edge detection\n\nThis module contains implementations of modern structured edge detection algorithms, i.e. algorithms\nwhich somehow takes into account pixel affinities in natural images.\n\n    @defgroup ximgproc_filters Filters\n\n    @defgroup ximgproc_superpixel Superpixels\n\n    @defgroup ximgproc_segmentation Image segmentation\n  @}\n*/\n\nnamespace cv {\nnamespace ximgproc {\n    CV_EXPORTS_W\n    void niBlackThreshold( InputArray _src, OutputArray _dst, double maxValue,\n            int type, int blockSize, double delta );\n\n} // namespace ximgproc\n} //namespace cv\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/xobjdetect.hpp",
    "content": "/*\n\nBy downloading, copying, installing or using the software you agree to this\nlicense. If you do not agree to this license, do not download, install,\ncopy or use the software.\n\n\n                          License Agreement\n               For Open Source Computer Vision Library\n                       (3-clause BSD License)\n\nCopyright (C) 2013, OpenCV Foundation, all rights reserved.\nThird party copyrights are property of their respective owners.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n  * Redistributions of source code must retain the above copyright notice,\n    this list of conditions and the following disclaimer.\n\n  * Redistributions in binary form must reproduce the above copyright notice,\n    this list of conditions and the following disclaimer in the documentation\n    and/or other materials provided with the distribution.\n\n  * Neither the names of the copyright holders nor the names of the contributors\n    may be used to endorse or promote products derived from this software\n    without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are\ndisclaimed. In no event shall copyright holders or contributors be liable for\nany direct, indirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n\n*/\n\n#ifndef __OPENCV_XOBJDETECT_XOBJDETECT_HPP__\n#define __OPENCV_XOBJDETECT_XOBJDETECT_HPP__\n\n#include <opencv2/core.hpp>\n#include <opencv2/highgui.hpp>\n#include <vector>\n#include <string>\n\n/** @defgroup xobjdetect Extended object detection\n*/\nnamespace cv\n{\nnamespace xobjdetect\n{\n//! @addtogroup xobjdetect\n//! @{\n\n\n/** @brief WaldBoost detector\n*/\nclass CV_EXPORTS WBDetector {\npublic:\n    /** @brief Read detector from FileNode.\n    @param node FileNode for input\n    */\n    virtual void read(const FileNode &node) = 0;\n\n    /** @brief Write detector to FileStorage.\n    @param fs FileStorage for output\n    */\n    virtual void write(FileStorage &fs) const = 0;\n\n    /** @brief Train WaldBoost detector\n    @param pos_samples Path to directory with cropped positive samples\n    @param neg_imgs Path to directory with negative (background) images\n    */\n    virtual void train(\n        const std::string& pos_samples,\n        const std::string& neg_imgs) = 0;\n\n    /** @brief Detect objects on image using WaldBoost detector\n    @param img Input image for detection\n    @param bboxes Bounding boxes coordinates output vector\n    @param confidences Confidence values for bounding boxes output vector\n    */\n    virtual void detect(\n        const Mat& img,\n        std::vector<Rect> &bboxes,\n        std::vector<double> &confidences) = 0;\n\n    /** @brief Create instance of WBDetector\n    */\n    static Ptr<WBDetector> create();\n\n    virtual ~WBDetector(){}\n};\n\n\n//! @}\n\n} /* namespace xobjdetect */\n} /* namespace cv */\n\n#endif /* __OPENCV_XOBJDETECT_XOBJDETECT_HPP__ */\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/xphoto/dct_image_denoising.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_DCT_IMAGE_DENOISING_HPP__\n#define __OPENCV_DCT_IMAGE_DENOISING_HPP__\n\n/** @file\n@date Jun 26, 2014\n@author Yury Gitman\n*/\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace xphoto\n{\n\n//! @addtogroup xphoto\n//! @{\n\n    /** @brief The function implements simple dct-based denoising\n\n    <http://www.ipol.im/pub/art/2011/ys-dct/>.\n    @param src source image\n    @param dst destination image\n    @param sigma expected noise standard deviation\n    @param psize size of block side where dct is computed\n\n    @sa\n       fastNlMeansDenoising\n     */\n    CV_EXPORTS_W void dctDenoising(const Mat &src, Mat &dst, const double sigma, const int psize = 16);\n\n//! @}\n\n}\n}\n\n#endif // __OPENCV_DCT_IMAGE_DENOISING_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/xphoto/inpainting.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_INPAINTING_HPP__\n#define __OPENCV_INPAINTING_HPP__\n\n/** @file\n@date Jul 22, 2014\n@author Yury Gitman\n*/\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace xphoto\n{\n\n//! @addtogroup xphoto\n//! @{\n\n    //! various inpainting algorithms\n    enum InpaintTypes\n    {\n        /** This algorithm searches for dominant correspondences (transformations) of\n        image patches and tries to seamlessly fill-in the area to be inpainted using this\n        transformations */\n        INPAINT_SHIFTMAP = 0\n    };\n\n    /** @brief The function implements different single-image inpainting algorithms.\n\n    See the original paper @cite He2012 for details.\n\n    @param src source image, it could be of any type and any number of channels from 1 to 4. In case of\n    3- and 4-channels images the function expect them in CIELab colorspace or similar one, where first\n    color component shows intensity, while second and third shows colors. Nonetheless you can try any\n    colorspaces.\n    @param mask mask (CV_8UC1), where non-zero pixels indicate valid image area, while zero pixels\n    indicate area to be inpainted\n    @param dst destination image\n    @param algorithmType see xphoto::InpaintTypes\n    */\n    CV_EXPORTS_W void inpaint(const Mat &src, const Mat &mask, Mat &dst, const int algorithmType);\n\n//! @}\n\n}\n}\n\n#endif // __OPENCV_INPAINTING_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/xphoto/white_balance.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_SIMPLE_COLOR_BALANCE_HPP__\n#define __OPENCV_SIMPLE_COLOR_BALANCE_HPP__\n\n/** @file\n@date Jun 26, 2014\n@author Yury Gitman\n*/\n\n#include <opencv2/core.hpp>\n\nnamespace cv\n{\nnamespace xphoto\n{\n\n//! @addtogroup xphoto\n//! @{\n\n    //! various white balance algorithms\n    enum WhitebalanceTypes\n    {\n        /** perform smart histogram adjustments (ignoring 4% pixels with minimal and maximal\n        values) for each channel */\n        WHITE_BALANCE_SIMPLE = 0,\n        WHITE_BALANCE_GRAYWORLD = 1\n    };\n\n    /** @brief The function implements different algorithm of automatic white balance,\n\n    i.e. it tries to map image's white color to perceptual white (this can be violated due to\n    specific illumination or camera settings).\n\n    @param src\n    @param dst\n    @param algorithmType see xphoto::WhitebalanceTypes\n    @param inputMin minimum value in the input image\n    @param inputMax maximum value in the input image\n    @param outputMin minimum value in the output image\n    @param outputMax maximum value in the output image\n    @sa cvtColor, equalizeHist\n     */\n    CV_EXPORTS_W void balanceWhite(const Mat &src, Mat &dst, const int algorithmType,\n        const float inputMin  = 0.0f, const float inputMax  = 255.0f,\n        const float outputMin = 0.0f, const float outputMax = 255.0f);\n\n    /** @brief Implements a simple grayworld white balance algorithm.\n\n    The function autowbGrayworld scales the values of pixels based on a\n    gray-world assumption which states that the average of all channels\n    should result in a gray image.\n\n    This function adds a modification which thresholds pixels based on their\n    saturation value and only uses pixels below the provided threshold in\n    finding average pixel values.\n\n    Saturation is calculated using the following for a 3-channel RGB image per\n    pixel I and is in the range [0, 1]:\n\n    \\f[ \\texttt{Saturation} [I] = \\frac{\\textrm{max}(R,G,B) - \\textrm{min}(R,G,B)\n    }{\\textrm{max}(R,G,B)} \\f]\n\n    A threshold of 1 means that all pixels are used to white-balance, while a\n    threshold of 0 means no pixels are used. Lower thresholds are useful in\n    white-balancing saturated images.\n\n    Currently only works on images of type @ref CV_8UC3.\n\n    @param src Input array.\n    @param dst Output array of the same size and type as src.\n    @param thresh Maximum saturation for a pixel to be included in the\n        gray-world assumption.\n\n    @sa balanceWhite\n     */\n    CV_EXPORTS_W void autowbGrayworld(InputArray src, OutputArray dst,\n        float thresh = 0.5f);\n\n//! @}\n\n}\n}\n\n#endif // __OPENCV_SIMPLE_COLOR_BALANCE_HPP__\n"
  },
  {
    "path": "src/3rdparty/opencv/include/opencv2/xphoto.hpp",
    "content": "/*M///////////////////////////////////////////////////////////////////////////////////////\n//\n//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n//\n//  By downloading, copying, installing or using the software you agree to this license.\n//  If you do not agree to this license, do not download, install,\n//  copy or use the software.\n//\n//\n//                           License Agreement\n//                For Open Source Computer Vision Library\n//\n// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\n// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\n// Third party copyrights are property of their respective owners.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n//   * Redistribution's of source code must retain the above copyright notice,\n//     this list of conditions and the following disclaimer.\n//\n//   * Redistribution's in binary form must reproduce the above copyright notice,\n//     this list of conditions and the following disclaimer in the documentation\n//     and/or other materials provided with the distribution.\n//\n//   * The name of the copyright holders may not be used to endorse or promote products\n//     derived from this software without specific prior written permission.\n//\n// This software is provided by the copyright holders and contributors \"as is\" and\n// any express or implied warranties, including, but not limited to, the implied\n// warranties of merchantability and fitness for a particular purpose are disclaimed.\n// In no event shall the Intel Corporation or contributors be liable for any direct,\n// indirect, incidental, special, exemplary, or consequential damages\n// (including, but not limited to, procurement of substitute goods or services;\n// loss of use, data, or profits; or business interruption) however caused\n// and on any theory of liability, whether in contract, strict liability,\n// or tort (including negligence or otherwise) arising in any way out of\n// the use of this software, even if advised of the possibility of such damage.\n//\n//M*/\n\n#ifndef __OPENCV_XPHOTO_HPP__\n#define __OPENCV_XPHOTO_HPP__\n\n/** @defgroup xphoto Additional photo processing algorithms\n*/\n\n#include \"xphoto/inpainting.hpp\"\n#include \"xphoto/white_balance.hpp\"\n#include \"xphoto/dct_image_denoising.hpp\"\n#endif\n"
  },
  {
    "path": "src/3rdparty/packtpub/BGRAVideoFrame.h",
    "content": "/*****************************************************************************\n*   BGRAVideoFrame.h\n*   Example_MarkerBasedAR\n******************************************************************************\n*   by Khvedchenia Ievgen, 5th Dec 2012\n*   http://computer-vision-talks.com\n******************************************************************************\n*   Ch2 of the book \"Mastering OpenCV with Practical Computer Vision Projects\"\n*   Copyright Packt Publishing 2012.\n*   http://www.packtpub.com/cool-projects-with-opencv/book\n*****************************************************************************/\n\n#ifndef Example_MarkerBasedAR_BGRAVideoFrame_h\n#define Example_MarkerBasedAR_BGRAVideoFrame_h\n\n#include <cstddef>\n\n// A helper struct presenting interleaved BGRA image in memory.\nstruct BGRAVideoFrame\n{\n    size_t width;\n    size_t height;\n    size_t stride;\n    \n    unsigned char * data;\n};\n\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/packtpub/CameraCalibration.hpp",
    "content": "/*****************************************************************************\n*   CameraCalibration.hpp\n*   Example_MarkerBasedAR\n******************************************************************************\n*   by Khvedchenia Ievgen, 5th Dec 2012\n*   http://computer-vision-talks.com\n******************************************************************************\n*   Ch2 of the book \"Mastering OpenCV with Practical Computer Vision Projects\"\n*   Copyright Packt Publishing 2012.\n*   http://www.packtpub.com/cool-projects-with-opencv/book\n*****************************************************************************/\n\n#ifndef Example_MarkerBasedAR_CameraCalibration_hpp\n#define Example_MarkerBasedAR_CameraCalibration_hpp\n\n////////////////////////////////////////////////////////////////////\n// File includes:\n#include \"GeometryTypes.hpp\"\n\n/**\n * A camera calibraiton class that stores intrinsic matrix\n * and distorsion vector.\n */\nclass CameraCalibration\n{\npublic:\n  CameraCalibration();\n  CameraCalibration(float fx, float fy, float cx, float cy);\n  CameraCalibration(float fx, float fy, float cx, float cy, float distorsionCoeff[4]);\n  \n  void getMatrix34(float cparam[3][4]) const;\n\n  const Matrix33& getIntrinsic() const;\n  const Vector4&  getDistorsion() const;\n  \nprivate:\n  Matrix33 m_intrinsic;\n  Vector4  m_distorsion;\n};\n\nCameraCalibration::CameraCalibration()\n{\n  \n}\n\nCameraCalibration::CameraCalibration(float fx, float fy, float cx, float cy)\n{\n  for (int i=0; i<3; i++)\n    for (int j=0; j<3; j++)\n      m_intrinsic.mat[i][j] = 0;\n  \n  m_intrinsic.mat[0][0] = fx;\n  m_intrinsic.mat[1][1] = fy;\n  m_intrinsic.mat[0][2] = cx;\n  m_intrinsic.mat[1][2] = cy;\n  \n  for (int i=0; i<4; i++)\n    m_distorsion.data[i] = 0;\n}\n\n\nCameraCalibration::CameraCalibration(float fx, float fy, float cx, float cy, float distorsionCoeff[4])\n{\n  for (int i=0; i<3; i++)\n    for (int j=0; j<3; j++)\n      m_intrinsic.mat[i][j] = 0;\n  \n  m_intrinsic.mat[0][0] = fx;\n  m_intrinsic.mat[1][1] = fy;\n  m_intrinsic.mat[0][2] = cx;\n  m_intrinsic.mat[1][2] = cy;\n  \n  for (int i=0; i<4; i++)\n    m_distorsion.data[i] = distorsionCoeff[i];\n}\n\nvoid CameraCalibration::getMatrix34(float cparam[3][4]) const\n{\n  for (int j=0; j<3; j++)\n    for (int i=0; i<3; i++)\n      cparam[i][j] = m_intrinsic.mat[i][j];\n  \n  for (int i=0; i<4; i++)\n    cparam[3][i] = m_distorsion.data[i];\n}\n\nconst Matrix33& CameraCalibration::getIntrinsic() const\n{\n  return m_intrinsic;\n}\n\nconst Vector4&  CameraCalibration::getDistorsion() const\n{\n  return m_distorsion;\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/packtpub/DebugHelpers.hpp",
    "content": "#ifndef DEBUG_HELPERS_HPP\n#define DEBUG_HELPERS_HPP\n\n#include <string>\n#include <sstream>\n\ntemplate <typename T>\nstd::string ToString(const T& value)\n{\n    std::ostringstream stream;\n    stream << value;\n    return stream.str();\n}\n\nnamespace cv\n{\n    inline void showAndSave(std::string name, const cv::Mat& m)\n    {\n        cv::imshow(name, m);\n        cv::imwrite(name + \".png\", m);\n    }\n}\n\n#endif"
  },
  {
    "path": "src/3rdparty/packtpub/GeometryTypes.hpp",
    "content": "/*****************************************************************************\n*   GeometryTypes.hpp\n*   Example_MarkerBasedAR\n******************************************************************************\n*   by Khvedchenia Ievgen, 5th Dec 2012\n*   http://computer-vision-talks.com\n******************************************************************************\n*   Ch2 of the book \"Mastering OpenCV with Practical Computer Vision Projects\"\n*   Copyright Packt Publishing 2012.\n*   http://www.packtpub.com/cool-projects-with-opencv/book\n*****************************************************************************/\n\n#ifndef Example_MarkerBasedAR_GeometryTypes_hpp\n#define Example_MarkerBasedAR_GeometryTypes_hpp\n\nstruct Matrix44\n{\n  union\n  {\n    float data[16];\n    float mat[4][4];\n  };\n  \n  Matrix44 getTransposed() const;\n  Matrix44 getInvertedRT() const;\n  static Matrix44 identity();\n};\n\nstruct Matrix33\n{\n  union\n  {\n    float data[9];\n    float mat[3][3];\n  };\n  \n  static Matrix33 identity();\n  Matrix33 getTransposed() const;\n};\n\nstruct Vector4\n{\n  float data[4];\n};\n\nstruct Vector3\n{\n  float data[3];\n  \n  static Vector3 zero();\n  Vector3 operator-() const;\n};\n\nstruct Transformation\n{\n  Transformation();\n  Transformation(const Matrix33& r, const Vector3& t);\n  \n  Matrix33& r();\n  Vector3&  t();\n  \n  const Matrix33& r() const;\n  const Vector3&  t() const;\n  \n  Matrix44 getMat44() const;\n  \n  Transformation getInverted() const;\nprivate:\n  Matrix33 m_rotation;\n  Vector3  m_translation;\n};\n\nMatrix44 Matrix44::getTransposed() const\n{\n  Matrix44 t;\n  \n  for (int i=0;i<4; i++)\n    for (int j=0;j<4;j++)\n      t.mat[i][j] = mat[j][i];\n    \n  return t;\n}\n\nMatrix44 Matrix44::identity()\n{\n  Matrix44 eye;\n  \n  for (int i=0;i<4; i++)\n    for (int j=0;j<4;j++)\n      eye.mat[i][j] = i == j ? 1 : 0;\n  \n  return eye;\n}\n\nMatrix44 Matrix44::getInvertedRT() const\n{\n  Matrix44 t = identity();\n  \n  for (int col=0;col<3; col++)\n  {\n    for (int row=0;row<3;row++)\n    { \n      // Transpose rotation component (inversion)\n      t.mat[row][col] = mat[col][row];\n    }\n    \n    // Inverse translation component\n    t.mat[3][col] = - mat[3][col];\n  }\n  return t;\n}\n\nMatrix33 Matrix33::identity()\n{\n  Matrix33 eye;\n  \n  for (int i=0;i<3; i++)\n    for (int j=0;j<3;j++)\n      eye.mat[i][j] = i == j ? 1 : 0;\n  \n  return eye;\n}\n\nMatrix33 Matrix33::getTransposed() const\n{\n  Matrix33 t;\n  \n  for (int i=0;i<3; i++)\n    for (int j=0;j<3;j++)\n      t.mat[i][j] = mat[j][i];\n  \n  return t;\n}\n\nVector3 Vector3::zero()\n{\n  Vector3 v = { 0,0,0 };\n  return v;\n}\n\nVector3 Vector3::operator-() const\n{\n  Vector3 v = { -data[0],-data[1],-data[2] };\n  return v;\n}\n\nTransformation::Transformation()\n: m_rotation(Matrix33::identity())\n, m_translation(Vector3::zero())\n{\n  \n}\n\nTransformation::Transformation(const Matrix33& r, const Vector3& t)\n: m_rotation(r)\n, m_translation(t)\n{\n  \n}\n\nMatrix33& Transformation::r()\n{\n  return m_rotation;\n}\n\nVector3&  Transformation::t()\n{\n  return  m_translation;\n}\n\nconst Matrix33& Transformation::r() const\n{\n  return m_rotation;\n}\n\nconst Vector3&  Transformation::t() const\n{\n  return  m_translation;\n}\n\nMatrix44 Transformation::getMat44() const\n{\n  Matrix44 res = Matrix44::identity();\n  \n  for (int col=0;col<3;col++)\n  {\n    for (int row=0;row<3;row++)\n    {\n      // Copy rotation component\n      res.mat[row][col] = m_rotation.mat[row][col];\n    }\n    \n    // Copy translation component\n    res.mat[3][col] = m_translation.data[col];\n  }\n  \n  return res;\n}\n\nTransformation Transformation::getInverted() const\n{\n  return Transformation(m_rotation.getTransposed(), -m_translation); \n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/packtpub/Marker.hpp",
    "content": "/*****************************************************************************\n*   Marker.hpp\n*   Example_MarkerBasedAR\n******************************************************************************\n*   by Khvedchenia Ievgen, 5th Dec 2012\n*   http://computer-vision-talks.com\n******************************************************************************\n*   Ch2 of the book \"Mastering OpenCV with Practical Computer Vision Projects\"\n*   Copyright Packt Publishing 2012.\n*   http://www.packtpub.com/cool-projects-with-opencv/book\n*****************************************************************************/\n\n#ifndef Example_MarkerBasedAR_Marker_hpp\n#define Example_MarkerBasedAR_Marker_hpp\n\n////////////////////////////////////////////////////////////////////\n// Standard includes:\n#include <vector>\n#include <iostream>\n#include <opencv2/opencv.hpp>\n\n////////////////////////////////////////////////////////////////////\n// File includes:\n#include \"GeometryTypes.hpp\"\n\n/**\n * This class represents a marker\n */\nclass Marker\n{  \npublic:\n  Marker();\n  \n  friend bool operator<(const Marker &M1,const Marker&M2);\n  friend std::ostream & operator<<(std::ostream &str,const Marker &M);\n\n  static cv::Mat rotate(cv::Mat  in);\n  static int hammDistMarker(cv::Mat bits);\n  static int mat2id(const cv::Mat &bits);\n  static int getMarkerId(cv::Mat &in,int &nRotations);\n  \npublic:\n  \n  // Id of  the marker\n  int id;\n  \n  // Marker transformation with regards to the camera\n  Transformation transformation;\n  \n  std::vector<cv::Point2f> points;\n\n  // Helper function to draw the marker contour over the image\n  void drawContour(cv::Mat& image, cv::Scalar color = CV_RGB(0,250,0)) const;\n};\n\n#include \"DebugHelpers.hpp\"\n\nMarker::Marker()\n: id(-1)\n{\n}\n\nbool operator<(const Marker &M1,const Marker&M2)\n{\n  return M1.id<M2.id;\n}\n\ncv::Mat Marker::rotate(cv::Mat in)\n{\n  cv::Mat out;\n  in.copyTo(out);\n  for (int i=0;i<in.rows;i++)\n  {\n    for (int j=0;j<in.cols;j++)\n    {\n      out.at<uchar>(i,j)=in.at<uchar>(in.cols-j-1,i);\n    }\n  }\n  return out;\n}\n\nint Marker::hammDistMarker(cv::Mat bits)\n{\n  int ids[4][5]=\n  {\n    {1,0,0,0,0},\n    {1,0,1,1,1},\n    {0,1,0,0,1},\n    {0,1,1,1,0}\n  };\n  \n  int dist=0;\n  \n  for (int y=0;y<5;y++)\n  {\n    int minSum=1e5; //hamming distance to each possible word\n    \n    for (int p=0;p<4;p++)\n    {\n      int sum=0;\n      //now, count\n      for (int x=0;x<5;x++)\n      {\n        sum += bits.at<uchar>(y,x) == ids[p][x] ? 0 : 1;\n      }\n      \n      if (minSum>sum)\n        minSum=sum;\n    }\n    \n    //do the and\n    dist += minSum;\n  }\n  \n  return dist;\n}\n\nint Marker::mat2id(const cv::Mat &bits)\n{\n  int val=0;\n  for (int y=0;y<5;y++)\n  {\n    val<<=1;\n    if ( bits.at<uchar>(y,1)) val|=1;\n    val<<=1;\n    if ( bits.at<uchar>(y,3)) val|=1;\n  }\n  return val;\n}\n\nint Marker::getMarkerId(cv::Mat &markerImage,int &nRotations)\n{\n  assert(markerImage.rows == markerImage.cols);\n  assert(markerImage.type() == CV_8UC1);\n  \n  cv::Mat grey = markerImage;\n\n  // Threshold image\n  cv::threshold(grey, grey, 125, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);\n\n#ifdef SHOW_DEBUG_IMAGES\n  cv::showAndSave(\"Binary marker\", grey);\n#endif\n\n  //Markers  are divided in 7x7 regions, of which the inner 5x5 belongs to marker info\n  //the external border should be entirely black\n  \n  int cellSize = markerImage.rows / 7;\n  \n  for (int y=0;y<7;y++)\n  {\n    int inc=6;\n    \n    if (y==0 || y==6) inc=1; //for first and last row, check the whole border\n    \n    for (int x=0;x<7;x+=inc)\n    {\n      int cellX = x * cellSize;\n      int cellY = y * cellSize;\n      cv::Mat cell = grey(cv::Rect(cellX,cellY,cellSize,cellSize));\n      \n      int nZ = cv::countNonZero(cell);\n\n      if (nZ > (cellSize*cellSize) / 2)\n      {\n        return -1;//can not be a marker because the border element is not black!\n      }\n    }\n  }\n  \n  cv::Mat bitMatrix = cv::Mat::zeros(5,5,CV_8UC1);\n  \n  //get information(for each inner square, determine if it is  black or white)  \n  for (int y=0;y<5;y++)\n  {\n    for (int x=0;x<5;x++)\n    {\n      int cellX = (x+1)*cellSize;\n      int cellY = (y+1)*cellSize;\n      cv::Mat cell = grey(cv::Rect(cellX,cellY,cellSize,cellSize));\n      \n      int nZ = cv::countNonZero(cell);\n      if (nZ> (cellSize*cellSize) /2) \n        bitMatrix.at<uchar>(y,x) = 1;\n    }\n  }\n  \n  //check all possible rotations\n  cv::Mat rotations[4];\n  int distances[4];\n  \n  rotations[0] = bitMatrix;  \n  distances[0] = hammDistMarker(rotations[0]);\n  \n  std::pair<int,int> minDist(distances[0],0);\n  \n  for (int i=1; i<4; i++)\n  {\n    //get the hamming distance to the nearest possible word\n    rotations[i] = rotate(rotations[i-1]);\n    distances[i] = hammDistMarker(rotations[i]);\n    \n    if (distances[i] < minDist.first)\n    {\n      minDist.first  = distances[i];\n      minDist.second = i;\n    }\n  }\n  \n  nRotations = minDist.second;\n  if (minDist.first == 0)\n  {\n    return mat2id(rotations[minDist.second]);\n  }\n  \n  return -1;\n}\n\nvoid Marker::drawContour(cv::Mat& image, cv::Scalar color) const\n{\n    float thickness = 2;\n\n    cv::line(image, points[0], points[1], color, thickness, CV_AA);\n    cv::line(image, points[1], points[2], color, thickness, CV_AA);\n    cv::line(image, points[2], points[3], color, thickness, CV_AA);\n    cv::line(image, points[3], points[0], color, thickness, CV_AA);\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/packtpub/MarkerDetector.hpp",
    "content": "/*****************************************************************************\n*   MarkerDetector.hpp\n*   Example_MarkerBasedAR\n******************************************************************************\n*   by Khvedchenia Ievgen, 5th Dec 2012\n*   http://computer-vision-talks.com\n******************************************************************************\n*   Ch2 of the book \"Mastering OpenCV with Practical Computer Vision Projects\"\n*   Copyright Packt Publishing 2012.\n*   http://www.packtpub.com/cool-projects-with-opencv/book\n*****************************************************************************/\n\n#ifndef Example_MarkerBasedAR_MarkerDetector_hpp\n#define Example_MarkerBasedAR_MarkerDetector_hpp\n\n////////////////////////////////////////////////////////////////////\n// Standard includes:\n#include <vector>\n#include <opencv2/opencv.hpp>\n\n////////////////////////////////////////////////////////////////////\n// File includes:\n#include \"BGRAVideoFrame.h\"\n#include \"CameraCalibration.hpp\"\n\n////////////////////////////////////////////////////////////////////\n// Forward declaration:\nclass Marker;\n\n/**\n * A top-level class that encapsulate marker detector algorithm\n */\nclass MarkerDetector\n{\npublic:\n  typedef std::vector<cv::Point>    PointsVector;\n  typedef std::vector<PointsVector> ContoursVector;\n\n\n  /**\n   * Initialize a new instance of marker detector object\n   * @calibration[in] - Camera calibration (intrinsic and distortion components) necessary for pose estimation.\n   */\n  MarkerDetector(CameraCalibration calibration);\n  \n  //! Searches for markes and fills the list of transformation for found markers\n  void processFrame(const BGRAVideoFrame& frame);\n  \n  const std::vector<Transformation>& getTransformations() const;\n  \nprotected:\n\n  //! Main marker detection routine\n  bool findMarkers(const BGRAVideoFrame& frame, std::vector<Marker>& detectedMarkers);\n\n  //! Converts image to grayscale\n  void prepareImage(const cv::Mat& bgraMat, cv::Mat& grayscale) const;\n\n  //! Performs binary threshold\n  void performThreshold(const cv::Mat& grayscale, cv::Mat& thresholdImg) const;\n\n  //! Detects appropriate contours\n  void findContours(cv::Mat& thresholdImg, ContoursVector& contours, int minContourPointsAllowed) const;\n\n  //! Finds marker candidates among all contours\n  void findCandidates(const ContoursVector& contours, std::vector<Marker>& detectedMarkers);\n  \n  //! Tries to recognize markers by detecting marker code \n  void recognizeMarkers(const cv::Mat& grayscale, std::vector<Marker>& detectedMarkers);\n\n  //! Calculates marker poses in 3D\n  void estimatePosition(std::vector<Marker>& detectedMarkers);\n\nprivate:\n  float m_minContourLengthAllowed;\n  \n  cv::Size markerSize;\n  cv::Mat camMatrix;\n  cv::Mat distCoeff;\n  std::vector<Transformation> m_transformations;\n  \n  cv::Mat m_grayscaleImage;\n  cv::Mat m_thresholdImg;  \n  cv::Mat canonicalMarkerImage;\n\n  ContoursVector           m_contours;\n  std::vector<cv::Point3f> m_markerCorners3d;\n  std::vector<cv::Point2f> m_markerCorners2d;\n};\n\n\n////////////////////////////////////////////////////////////////////\n// Standard includes:\n#include <iostream>\n#include <sstream>\n\n////////////////////////////////////////////////////////////////////\n// File includes:\n#include \"MarkerDetector.hpp\"\n#include \"Marker.hpp\"\n#include \"TinyLA.hpp\"\n#include \"DebugHelpers.hpp\"\n\nMarkerDetector::MarkerDetector(CameraCalibration calibration)\n    : m_minContourLengthAllowed(100)\n    , markerSize(100,100)\n{\n    cv::Mat(3,3, CV_32F, const_cast<float*>(&calibration.getIntrinsic().data[0])).copyTo(camMatrix);\n    cv::Mat(4,1, CV_32F, const_cast<float*>(&calibration.getDistorsion().data[0])).copyTo(distCoeff);\n\n    bool centerOrigin = true;\n    if (centerOrigin)\n    {\n        m_markerCorners3d.push_back(cv::Point3f(-0.5f,-0.5f,0));\n        m_markerCorners3d.push_back(cv::Point3f(+0.5f,-0.5f,0));\n        m_markerCorners3d.push_back(cv::Point3f(+0.5f,+0.5f,0));\n        m_markerCorners3d.push_back(cv::Point3f(-0.5f,+0.5f,0));\n    }\n    else\n    {\n        m_markerCorners3d.push_back(cv::Point3f(0,0,0));\n        m_markerCorners3d.push_back(cv::Point3f(1,0,0));\n        m_markerCorners3d.push_back(cv::Point3f(1,1,0));\n        m_markerCorners3d.push_back(cv::Point3f(0,1,0));    \n    }\n\n    m_markerCorners2d.push_back(cv::Point2f(0,0));\n    m_markerCorners2d.push_back(cv::Point2f(markerSize.width-1,0));\n    m_markerCorners2d.push_back(cv::Point2f(markerSize.width-1,markerSize.height-1));\n    m_markerCorners2d.push_back(cv::Point2f(0,markerSize.height-1));\n}\n\nvoid MarkerDetector::processFrame(const BGRAVideoFrame& frame)\n{\n    std::vector<Marker> markers;\n    findMarkers(frame, markers);\n\n    m_transformations.clear();\n    for (size_t i=0; i<markers.size(); i++)\n    {\n        m_transformations.push_back(markers[i].transformation);\n    }\n}\n\nconst std::vector<Transformation>& MarkerDetector::getTransformations() const\n{\n    return m_transformations;\n}\n\n\nbool MarkerDetector::findMarkers(const BGRAVideoFrame& frame, std::vector<Marker>& detectedMarkers)\n{\n    cv::Mat bgraMat(frame.height, frame.width, CV_8UC4, frame.data, frame.stride);\n\n    // Convert the image to grayscale\n    prepareImage(bgraMat, m_grayscaleImage);\n\n    // Make it binary\n    performThreshold(m_grayscaleImage, m_thresholdImg);\n\n    // Detect contours\n    findContours(m_thresholdImg, m_contours, m_grayscaleImage.cols / 5);\n\n    // Find closed contours that can be approximated with 4 points\n    findCandidates(m_contours, detectedMarkers);\n\n    // Find is them are markers\n    recognizeMarkers(m_grayscaleImage, detectedMarkers);\n\n    // Calculate their poses\n    estimatePosition(detectedMarkers);\n\n    //sort by id\n    std::sort(detectedMarkers.begin(), detectedMarkers.end());\n    return false;\n}\n\nvoid MarkerDetector::prepareImage(const cv::Mat& bgraMat, cv::Mat& grayscale) const\n{\n    // Convert to grayscale\n    cv::cvtColor(bgraMat, grayscale, CV_BGRA2GRAY);\n}\n\nvoid MarkerDetector::performThreshold(const cv::Mat& grayscale, cv::Mat& thresholdImg) const\n{\n    cv::threshold(grayscale, thresholdImg, 127, 255, cv::THRESH_BINARY_INV);\n\n    /*\n    cv::adaptiveThreshold(grayscale,   // Input image\n    thresholdImg,// Result binary image\n    255,         // \n    cv::ADAPTIVE_THRESH_GAUSSIAN_C, //\n    cv::THRESH_BINARY_INV, //\n    7, //\n    7  //\n    );\n    */\n\n#ifdef SHOW_DEBUG_IMAGES\n    cv::showAndSave(\"Threshold image\", thresholdImg);\n#endif\n}\n\nvoid MarkerDetector::findContours(cv::Mat& thresholdImg, ContoursVector& contours, int minContourPointsAllowed) const\n{\n    ContoursVector allContours;\n    cv::findContours(thresholdImg, allContours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);\n\n    contours.clear();\n    for (size_t i=0; i<allContours.size(); i++)\n    {\n        int contourSize = allContours[i].size();\n        if (contourSize > minContourPointsAllowed)\n        {\n            contours.push_back(allContours[i]);\n        }\n    }\n\n#ifdef SHOW_DEBUG_IMAGES\n    {\n        cv::Mat contoursImage(thresholdImg.size(), CV_8UC1);\n        contoursImage = cv::Scalar(0);\n        cv::drawContours(contoursImage, contours, -1, cv::Scalar(255), 2, CV_AA);\n        cv::showAndSave(\"Contours\", contoursImage);\n    }\n#endif\n}\n\nvoid MarkerDetector::findCandidates\n(\n    const ContoursVector& contours, \n    std::vector<Marker>& detectedMarkers\n) \n{\n    std::vector<cv::Point>  approxCurve;\n    std::vector<Marker>     possibleMarkers;\n\n    // For each contour, analyze if it is a parallelepiped likely to be the marker\n    for (size_t i=0; i<contours.size(); i++)\n    {\n        // Approximate to a polygon\n        double eps = contours[i].size() * 0.05;\n        cv::approxPolyDP(contours[i], approxCurve, eps, true);\n\n        // We interested only in polygons that contains only four points\n        if (approxCurve.size() != 4)\n            continue;\n\n        // And they have to be convex\n        if (!cv::isContourConvex(approxCurve))\n            continue;\n\n        // Ensure that the distance between consecutive points is large enough\n        float minDist = std::numeric_limits<float>::max();\n\n        for (int i = 0; i < 4; i++)\n        {\n            cv::Point side = approxCurve[i] - approxCurve[(i+1)%4];            \n            float squaredSideLength = side.dot(side);\n            minDist = std::min(minDist, squaredSideLength);\n        }\n\n        // Check that distance is not very small\n        if (minDist < m_minContourLengthAllowed)\n            continue;\n\n        // All tests are passed. Save marker candidate:\n        Marker m;\n\n        for (int i = 0; i<4; i++)\n            m.points.push_back( cv::Point2f(approxCurve[i].x,approxCurve[i].y) );\n\n        // Sort the points in anti-clockwise order\n        // Trace a line between the first and second point.\n        // If the third point is at the right side, then the points are anti-clockwise\n        cv::Point v1 = m.points[1] - m.points[0];\n        cv::Point v2 = m.points[2] - m.points[0];\n\n        double o = (v1.x * v2.y) - (v1.y * v2.x);\n\n        if (o < 0.0)\t\t //if the third point is in the left side, then sort in anti-clockwise order\n            std::swap(m.points[1], m.points[3]);\n\n        possibleMarkers.push_back(m);\n    }\n\n\n    // Remove these elements which corners are too close to each other.  \n    // First detect candidates for removal:\n    std::vector< std::pair<int,int> > tooNearCandidates;\n    for (size_t i=0;i<possibleMarkers.size();i++)\n    { \n        const Marker& m1 = possibleMarkers[i];\n\n        //calculate the average distance of each corner to the nearest corner of the other marker candidate\n        for (size_t j=i+1;j<possibleMarkers.size();j++)\n        {\n            const Marker& m2 = possibleMarkers[j];\n\n            float distSquared = 0;\n\n            for (int c = 0; c < 4; c++)\n            {\n                cv::Point v = m1.points[c] - m2.points[c];\n                distSquared += v.dot(v);\n            }\n\n            distSquared /= 4;\n\n            if (distSquared < 100)\n            {\n                tooNearCandidates.push_back(std::pair<int,int>(i,j));\n            }\n        }\t\t\t\t\n    }\n\n    // Mark for removal the element of the pair with smaller perimeter\n    std::vector<bool> removalMask (possibleMarkers.size(), false);\n\n    for (size_t i=0; i<tooNearCandidates.size(); i++)\n    {\n        float p1 = perimeter(possibleMarkers[tooNearCandidates[i].first ].points);\n        float p2 = perimeter(possibleMarkers[tooNearCandidates[i].second].points);\n\n        size_t removalIndex;\n        if (p1 > p2)\n            removalIndex = tooNearCandidates[i].second;\n        else\n            removalIndex = tooNearCandidates[i].first;\n\n        removalMask[removalIndex] = true;\n    }\n\n    // Return candidates\n    detectedMarkers.clear();\n    for (size_t i=0;i<possibleMarkers.size();i++)\n    {\n        if (!removalMask[i])\n            detectedMarkers.push_back(possibleMarkers[i]);\n    }\n}\n\nvoid MarkerDetector::recognizeMarkers(const cv::Mat& grayscale, std::vector<Marker>& detectedMarkers)\n{\n    std::vector<Marker> goodMarkers;\n\n    // Identify the markers\n    for (size_t i=0;i<detectedMarkers.size();i++)\n    {\n        Marker& marker = detectedMarkers[i];\n\n        // Find the perspective transformation that brings current marker to rectangular form\n        cv::Mat markerTransform = cv::getPerspectiveTransform(marker.points, m_markerCorners2d);\n\n        // Transform image to get a canonical marker image\n        cv::warpPerspective(grayscale, canonicalMarkerImage,  markerTransform, markerSize);\n\n#ifdef SHOW_DEBUG_IMAGES\n        {\n            cv::Mat markerImage = grayscale.clone();\n            marker.drawContour(markerImage);\n            cv::Mat markerSubImage = markerImage(cv::boundingRect(marker.points));\n\n            cv::showAndSave(\"Source marker\" + ToString(i),           markerSubImage);\n            cv::showAndSave(\"Marker \" + ToString(i) + \" after warp\", canonicalMarkerImage);\n        }\n#endif\n\n        int nRotations;\n        int id = Marker::getMarkerId(canonicalMarkerImage, nRotations);\n        if (id !=- 1)\n        {\n            marker.id = id;\n            //sort the points so that they are always in the same order no matter the camera orientation\n            std::rotate(marker.points.begin(), marker.points.begin() + 4 - nRotations, marker.points.end());\n\n            goodMarkers.push_back(marker);\n        }\n    }  \n\n    // Refine marker corners using sub pixel accuracy\n    if (goodMarkers.size() > 0)\n    {\n        std::vector<cv::Point2f> preciseCorners(4 * goodMarkers.size());\n\n        for (size_t i=0; i<goodMarkers.size(); i++)\n        {  \n            const Marker& marker = goodMarkers[i];      \n\n            for (int c = 0; c <4; c++)\n            {\n                preciseCorners[i*4 + c] = marker.points[c];\n            }\n        }\n\n        cv::TermCriteria termCriteria = cv::TermCriteria(cv::TermCriteria::MAX_ITER | cv::TermCriteria::EPS, 30, 0.01);\n        cv::cornerSubPix(grayscale, preciseCorners, cvSize(5,5), cvSize(-1,-1), termCriteria);\n\n        // Copy refined corners position back to markers\n        for (size_t i=0; i<goodMarkers.size(); i++)\n        {\n            Marker& marker = goodMarkers[i];      \n\n            for (int c=0;c<4;c++) \n            {\n                marker.points[c] = preciseCorners[i*4 + c];\n            }      \n        }\n    }\n\n#ifdef SHOW_DEBUG_IMAGES\n    {\n        cv::Mat markerCornersMat(grayscale.size(), grayscale.type());\n        markerCornersMat = cv::Scalar(0);\n\n        for (size_t i=0; i<goodMarkers.size(); i++)\n        {\n            goodMarkers[i].drawContour(markerCornersMat, cv::Scalar(255));    \n        }\n\n        cv::showAndSave(\"Markers refined edges\", grayscale * 0.5 + markerCornersMat);\n    }\n#endif\n\n    detectedMarkers = goodMarkers;\n}\n\n\nvoid MarkerDetector::estimatePosition(std::vector<Marker>& detectedMarkers)\n{\n    for (size_t i=0; i<detectedMarkers.size(); i++)\n    {\t\t\t\t\t\n        Marker& m = detectedMarkers[i];\n\n        cv::Mat Rvec;\n        cv::Mat_<float> Tvec;\n        cv::Mat raux,taux;\n        cv::solvePnP(m_markerCorners3d, m.points, camMatrix, distCoeff,raux,taux);\n        raux.convertTo(Rvec,CV_32F);\n        taux.convertTo(Tvec ,CV_32F);\n\n        cv::Mat_<float> rotMat(3,3); \n        cv::Rodrigues(Rvec, rotMat);\n\n        // Copy to transformation matrix\n        for (int col=0; col<3; col++)\n        {\n            for (int row=0; row<3; row++)\n            {        \n                m.transformation.r().mat[row][col] = rotMat(row,col); // Copy rotation component\n            }\n            m.transformation.t().data[col] = Tvec(col); // Copy translation component\n        }\n\n        // Since solvePnP finds camera location, w.r.t to marker pose, to get marker pose w.r.t to the camera we invert it.\n        m.transformation = m.transformation.getInverted();\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/packtpub/TinyLA.hpp",
    "content": "/*****************************************************************************\n*   TinyLA.hpp\n*   Example_MarkerBasedAR\n******************************************************************************\n*   by Khvedchenia Ievgen, 5th Dec 2012\n*   http://computer-vision-talks.com\n******************************************************************************\n*   Ch2 of the book \"Mastering OpenCV with Practical Computer Vision Projects\"\n*   Copyright Packt Publishing 2012.\n*   http://www.packtpub.com/cool-projects-with-opencv/book\n*****************************************************************************/\n\n#ifndef Example_MarkerBasedAR_TinyLA_hpp\n#define Example_MarkerBasedAR_TinyLA_hpp\n\n#include <vector>\n#include <opencv2/opencv.hpp>\n\nfloat perimeter(const std::vector<cv::Point2f> &a)\n{\n  float sum=0, dx, dy;\n  \n  for (size_t i=0;i<a.size();i++)\n  {\n    size_t i2=(i+1) % a.size();\n    \n    dx = a[i].x - a[i2].x;\n    dy = a[i].y - a[i2].y;\n    \n    sum += sqrt(dx*dx + dy*dy);\n  }\n  \n  return sum;\n}\n\n\nbool isInto(cv::Mat &contour, std::vector<cv::Point2f> &b)\n{\n  for (size_t i=0;i<b.size();i++)\n  {\n    if (cv::pointPolygonTest( contour,b[i],false)>0) return true;\n  }\n  return false;\n}\n\n#endif\n"
  },
  {
    "path": "src/3rdparty/pthread/include/pthread.h",
    "content": "/* This is an implementation of the threads API of POSIX 1003.1-2001.\n *\n * --------------------------------------------------------------------------\n *\n *      Pthreads-win32 - POSIX Threads Library for Win32\n *      Copyright(C) 1998 John E. Bossom\n *      Copyright(C) 1999,2005 Pthreads-win32 contributors\n * \n *      Contact Email: rpj@callisto.canberra.edu.au\n * \n *      The current list of contributors is contained\n *      in the file CONTRIBUTORS included with the source\n *      code distribution. The list can also be seen at the\n *      following World Wide Web location:\n *      http://sources.redhat.com/pthreads-win32/contributors.html\n * \n *      This library is free software; you can redistribute it and/or\n *      modify it under the terms of the GNU Lesser General Public\n *      License as published by the Free Software Foundation; either\n *      version 2 of the License, or (at your option) any later version.\n * \n *      This library is distributed in the hope that it will be useful,\n *      but WITHOUT ANY WARRANTY; without even the implied warranty of\n *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n *      Lesser General Public License for more details.\n * \n *      You should have received a copy of the GNU Lesser General Public\n *      License along with this library in the file COPYING.LIB;\n *      if not, write to the Free Software Foundation, Inc.,\n *      59 Temple Place - Suite 330, Boston, MA 02111-1307, USA\n */\n\n#if !defined( PTHREAD_H )\n#define PTHREAD_H\n\n/*\n * See the README file for an explanation of the pthreads-win32 version\n * numbering scheme and how the DLL is named etc.\n */\n#define PTW32_VERSION 2,9,1,0\n#define PTW32_VERSION_STRING \"2, 9, 1, 0\\0\"\n\n/* There are three implementations of cancel cleanup.\n * Note that pthread.h is included in both application\n * compilation units and also internally for the library.\n * The code here and within the library aims to work\n * for all reasonable combinations of environments.\n *\n * The three implementations are:\n *\n *   WIN32 SEH\n *   C\n *   C++\n *\n * Please note that exiting a push/pop block via\n * \"return\", \"exit\", \"break\", or \"continue\" will\n * lead to different behaviour amongst applications\n * depending upon whether the library was built\n * using SEH, C++, or C. For example, a library built\n * with SEH will call the cleanup routine, while both\n * C++ and C built versions will not.\n */\n\n/*\n * Define defaults for cleanup code.\n * Note: Unless the build explicitly defines one of the following, then\n * we default to standard C style cleanup. This style uses setjmp/longjmp\n * in the cancelation and thread exit implementations and therefore won't\n * do stack unwinding if linked to applications that have it (e.g.\n * C++ apps). This is currently consistent with most/all commercial Unix\n * POSIX threads implementations.\n */\n#if !defined( __CLEANUP_SEH ) && !defined( __CLEANUP_CXX ) && !defined( __CLEANUP_C )\n# define __CLEANUP_C\n#endif\n\n#if defined( __CLEANUP_SEH ) && ( !defined( _MSC_VER ) && !defined(PTW32_RC_MSC))\n#error ERROR [__FILE__, line __LINE__]: SEH is not supported for this compiler.\n#endif\n\n/*\n * Stop here if we are being included by the resource compiler.\n */\n#if !defined(RC_INVOKED)\n\n#undef PTW32_LEVEL\n\n#if defined(_POSIX_SOURCE)\n#define PTW32_LEVEL 0\n/* Early POSIX */\n#endif\n\n#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 199309\n#undef PTW32_LEVEL\n#define PTW32_LEVEL 1\n/* Include 1b, 1c and 1d */\n#endif\n\n#if defined(INCLUDE_NP)\n#undef PTW32_LEVEL\n#define PTW32_LEVEL 2\n/* Include Non-Portable extensions */\n#endif\n\n#define PTW32_LEVEL_MAX 3\n\n#if ( defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 200112 )  || !defined(PTW32_LEVEL)\n#define PTW32_LEVEL PTW32_LEVEL_MAX\n/* Include everything */\n#endif\n\n#if defined(_UWIN)\n#   define HAVE_STRUCT_TIMESPEC 1\n#   define HAVE_SIGNAL_H        1\n#   undef HAVE_PTW32_CONFIG_H\n#   pragma comment(lib, \"pthread\")\n#endif\n\n/*\n * -------------------------------------------------------------\n *\n *\n * Module: pthread.h\n *\n * Purpose:\n *      Provides an implementation of PThreads based upon the\n *      standard:\n *\n *              POSIX 1003.1-2001\n *  and\n *    The Single Unix Specification version 3\n *\n *    (these two are equivalent)\n *\n *      in order to enhance code portability between Windows,\n *  various commercial Unix implementations, and Linux.\n *\n *      See the ANNOUNCE file for a full list of conforming\n *      routines and defined constants, and a list of missing\n *      routines and constants not defined in this implementation.\n *\n * Authors:\n *      There have been many contributors to this library.\n *      The initial implementation was contributed by\n *      John Bossom, and several others have provided major\n *      sections or revisions of parts of the implementation.\n *      Often significant effort has been contributed to\n *      find and fix important bugs and other problems to\n *      improve the reliability of the library, which sometimes\n *      is not reflected in the amount of code which changed as\n *      result.\n *      As much as possible, the contributors are acknowledged\n *      in the ChangeLog file in the source code distribution\n *      where their changes are noted in detail.\n *\n *      Contributors are listed in the CONTRIBUTORS file.\n *\n *      As usual, all bouquets go to the contributors, and all\n *      brickbats go to the project maintainer.\n *\n * Maintainer:\n *      The code base for this project is coordinated and\n *      eventually pre-tested, packaged, and made available by\n *\n *              Ross Johnson <rpj@callisto.canberra.edu.au>\n *\n * QA Testers:\n *      Ultimately, the library is tested in the real world by\n *      a host of competent and demanding scientists and\n *      engineers who report bugs and/or provide solutions\n *      which are then fixed or incorporated into subsequent\n *      versions of the library. Each time a bug is fixed, a\n *      test case is written to prove the fix and ensure\n *      that later changes to the code don't reintroduce the\n *      same error. The number of test cases is slowly growing\n *      and therefore so is the code reliability.\n *\n * Compliance:\n *      See the file ANNOUNCE for the list of implemented\n *      and not-implemented routines and defined options.\n *      Of course, these are all defined is this file as well.\n *\n * Web site:\n *      The source code and other information about this library\n *      are available from\n *\n *              http://sources.redhat.com/pthreads-win32/\n *\n * -------------------------------------------------------------\n */\n\n/* Try to avoid including windows.h */\n#if (defined(__MINGW64__) || defined(__MINGW32__)) && defined(__cplusplus)\n#define PTW32_INCLUDE_WINDOWS_H\n#endif\n\n#if defined(PTW32_INCLUDE_WINDOWS_H)\n#include <windows.h>\n#endif\n\n#if defined(_MSC_VER) && _MSC_VER < 1300 || defined(__DMC__)\n/*\n * VC++6.0 or early compiler's header has no DWORD_PTR type.\n */\ntypedef unsigned long DWORD_PTR;\ntypedef unsigned long ULONG_PTR;\n#endif\n/*\n * -----------------\n * autoconf switches\n * -----------------\n */\n\n#if defined(HAVE_PTW32_CONFIG_H)\n#include \"config.h\"\n#endif /* HAVE_PTW32_CONFIG_H */\n\n#if !defined(NEED_FTIME)\n#include <time.h>\n#else /* NEED_FTIME */\n/* use native WIN32 time API */\n#endif /* NEED_FTIME */\n\n#if defined(HAVE_SIGNAL_H)\n#include <signal.h>\n#endif /* HAVE_SIGNAL_H */\n\n#include <limits.h>\n\n/*\n * Boolean values to make us independent of system includes.\n */\nenum {\n  PTW32_FALSE = 0,\n  PTW32_TRUE = (! PTW32_FALSE)\n};\n\n/*\n * This is a duplicate of what is in the autoconf config.h,\n * which is only used when building the pthread-win32 libraries.\n */\n\n#if !defined(PTW32_CONFIG_H)\n#  if defined(WINCE)\n#    define NEED_ERRNO\n#    define NEED_SEM\n#  endif\n#  if defined(__MINGW64__)\n#    define HAVE_STRUCT_TIMESPEC\n#    define HAVE_MODE_T\n#  elif defined(_UWIN) || defined(__MINGW32__)\n#    define HAVE_MODE_T\n#  endif\n#endif\n\n/*\n *\n */\n\n#if PTW32_LEVEL >= PTW32_LEVEL_MAX\n#if defined(NEED_ERRNO)\n#include \"need_errno.h\"\n#else\n#include <errno.h>\n#endif\n#endif /* PTW32_LEVEL >= PTW32_LEVEL_MAX */\n\n/*\n * Several systems don't define some error numbers.\n */\n#if !defined(ENOTSUP)\n#  define ENOTSUP 48   /* This is the value in Solaris. */\n#endif\n\n#if !defined(ETIMEDOUT)\n#  define ETIMEDOUT 10060 /* Same as WSAETIMEDOUT */\n#endif\n\n#if !defined(ENOSYS)\n#  define ENOSYS 140     /* Semi-arbitrary value */\n#endif\n\n#if !defined(EDEADLK)\n#  if defined(EDEADLOCK)\n#    define EDEADLK EDEADLOCK\n#  else\n#    define EDEADLK 36     /* This is the value in MSVC. */\n#  endif\n#endif\n\n/* POSIX 2008 - related to robust mutexes */\n#if !defined(EOWNERDEAD)\n#  define EOWNERDEAD 43\n#endif\n#if !defined(ENOTRECOVERABLE)\n#  define ENOTRECOVERABLE 44\n#endif\n\n#include <sched.h>\n\n/*\n * To avoid including windows.h we define only those things that we\n * actually need from it.\n */\n#if !defined(PTW32_INCLUDE_WINDOWS_H)\n#if !defined(HANDLE)\n# define PTW32__HANDLE_DEF\n# define HANDLE void *\n#endif\n#if !defined(DWORD)\n# define PTW32__DWORD_DEF\n# define DWORD unsigned long\n#endif\n#endif\n\n#if !defined(HAVE_STRUCT_TIMESPEC)\n#define HAVE_STRUCT_TIMESPEC\n#if !defined(_TIMESPEC_DEFINED)\n#define _TIMESPEC_DEFINED\nstruct timespec {\n        time_t tv_sec;\n        long tv_nsec;\n};\n#endif /* _TIMESPEC_DEFINED */\n#endif /* HAVE_STRUCT_TIMESPEC */\n\n#if !defined(SIG_BLOCK)\n#define SIG_BLOCK 0\n#endif /* SIG_BLOCK */\n\n#if !defined(SIG_UNBLOCK)\n#define SIG_UNBLOCK 1\n#endif /* SIG_UNBLOCK */\n\n#if !defined(SIG_SETMASK)\n#define SIG_SETMASK 2\n#endif /* SIG_SETMASK */\n\n#if defined(__cplusplus)\nextern \"C\"\n{\n#endif                          /* __cplusplus */\n\n/*\n * -------------------------------------------------------------\n *\n * POSIX 1003.1-2001 Options\n * =========================\n *\n * Options are normally set in <unistd.h>, which is not provided\n * with pthreads-win32.\n *\n * For conformance with the Single Unix Specification (version 3), all of the\n * options below are defined, and have a value of either -1 (not supported)\n * or 200112L (supported).\n *\n * These options can neither be left undefined nor have a value of 0, because\n * either indicates that sysconf(), which is not implemented, may be used at\n * runtime to check the status of the option.\n *\n * _POSIX_THREADS (== 200112L)\n *                      If == 200112L, you can use threads\n *\n * _POSIX_THREAD_ATTR_STACKSIZE (== 200112L)\n *                      If == 200112L, you can control the size of a thread's\n *                      stack\n *                              pthread_attr_getstacksize\n *                              pthread_attr_setstacksize\n *\n * _POSIX_THREAD_ATTR_STACKADDR (== -1)\n *                      If == 200112L, you can allocate and control a thread's\n *                      stack. If not supported, the following functions\n *                      will return ENOSYS, indicating they are not\n *                      supported:\n *                              pthread_attr_getstackaddr\n *                              pthread_attr_setstackaddr\n *\n * _POSIX_THREAD_PRIORITY_SCHEDULING (== -1)\n *                      If == 200112L, you can use realtime scheduling.\n *                      This option indicates that the behaviour of some\n *                      implemented functions conforms to the additional TPS\n *                      requirements in the standard. E.g. rwlocks favour\n *                      writers over readers when threads have equal priority.\n *\n * _POSIX_THREAD_PRIO_INHERIT (== -1)\n *                      If == 200112L, you can create priority inheritance\n *                      mutexes.\n *                              pthread_mutexattr_getprotocol +\n *                              pthread_mutexattr_setprotocol +\n *\n * _POSIX_THREAD_PRIO_PROTECT (== -1)\n *                      If == 200112L, you can create priority ceiling mutexes\n *                      Indicates the availability of:\n *                              pthread_mutex_getprioceiling\n *                              pthread_mutex_setprioceiling\n *                              pthread_mutexattr_getprioceiling\n *                              pthread_mutexattr_getprotocol     +\n *                              pthread_mutexattr_setprioceiling\n *                              pthread_mutexattr_setprotocol     +\n *\n * _POSIX_THREAD_PROCESS_SHARED (== -1)\n *                      If set, you can create mutexes and condition\n *                      variables that can be shared with another\n *                      process.If set, indicates the availability\n *                      of:\n *                              pthread_mutexattr_getpshared\n *                              pthread_mutexattr_setpshared\n *                              pthread_condattr_getpshared\n *                              pthread_condattr_setpshared\n *\n * _POSIX_THREAD_SAFE_FUNCTIONS (== 200112L)\n *                      If == 200112L you can use the special *_r library\n *                      functions that provide thread-safe behaviour\n *\n * _POSIX_READER_WRITER_LOCKS (== 200112L)\n *                      If == 200112L, you can use read/write locks\n *\n * _POSIX_SPIN_LOCKS (== 200112L)\n *                      If == 200112L, you can use spin locks\n *\n * _POSIX_BARRIERS (== 200112L)\n *                      If == 200112L, you can use barriers\n *\n *      + These functions provide both 'inherit' and/or\n *        'protect' protocol, based upon these macro\n *        settings.\n *\n * -------------------------------------------------------------\n */\n\n/*\n * POSIX Options\n */\n#undef _POSIX_THREADS\n#define _POSIX_THREADS 200809L\n\n#undef _POSIX_READER_WRITER_LOCKS\n#define _POSIX_READER_WRITER_LOCKS 200809L\n\n#undef _POSIX_SPIN_LOCKS\n#define _POSIX_SPIN_LOCKS 200809L\n\n#undef _POSIX_BARRIERS\n#define _POSIX_BARRIERS 200809L\n\n#undef _POSIX_THREAD_SAFE_FUNCTIONS\n#define _POSIX_THREAD_SAFE_FUNCTIONS 200809L\n\n#undef _POSIX_THREAD_ATTR_STACKSIZE\n#define _POSIX_THREAD_ATTR_STACKSIZE 200809L\n\n/*\n * The following options are not supported\n */\n#undef _POSIX_THREAD_ATTR_STACKADDR\n#define _POSIX_THREAD_ATTR_STACKADDR -1\n\n#undef _POSIX_THREAD_PRIO_INHERIT\n#define _POSIX_THREAD_PRIO_INHERIT -1\n\n#undef _POSIX_THREAD_PRIO_PROTECT\n#define _POSIX_THREAD_PRIO_PROTECT -1\n\n/* TPS is not fully supported.  */\n#undef _POSIX_THREAD_PRIORITY_SCHEDULING\n#define _POSIX_THREAD_PRIORITY_SCHEDULING -1\n\n#undef _POSIX_THREAD_PROCESS_SHARED\n#define _POSIX_THREAD_PROCESS_SHARED -1\n\n\n/*\n * POSIX 1003.1-2001 Limits\n * ===========================\n *\n * These limits are normally set in <limits.h>, which is not provided with\n * pthreads-win32.\n *\n * PTHREAD_DESTRUCTOR_ITERATIONS\n *                      Maximum number of attempts to destroy\n *                      a thread's thread-specific data on\n *                      termination (must be at least 4)\n *\n * PTHREAD_KEYS_MAX\n *                      Maximum number of thread-specific data keys\n *                      available per process (must be at least 128)\n *\n * PTHREAD_STACK_MIN\n *                      Minimum supported stack size for a thread\n *\n * PTHREAD_THREADS_MAX\n *                      Maximum number of threads supported per\n *                      process (must be at least 64).\n *\n * SEM_NSEMS_MAX\n *                      The maximum number of semaphores a process can have.\n *                      (must be at least 256)\n *\n * SEM_VALUE_MAX\n *                      The maximum value a semaphore can have.\n *                      (must be at least 32767)\n *\n */\n#undef _POSIX_THREAD_DESTRUCTOR_ITERATIONS\n#define _POSIX_THREAD_DESTRUCTOR_ITERATIONS     4\n\n#undef PTHREAD_DESTRUCTOR_ITERATIONS\n#define PTHREAD_DESTRUCTOR_ITERATIONS           _POSIX_THREAD_DESTRUCTOR_ITERATIONS\n\n#undef _POSIX_THREAD_KEYS_MAX\n#define _POSIX_THREAD_KEYS_MAX                  128\n\n#undef PTHREAD_KEYS_MAX\n#define PTHREAD_KEYS_MAX                        _POSIX_THREAD_KEYS_MAX\n\n#undef PTHREAD_STACK_MIN\n#define PTHREAD_STACK_MIN                       0\n\n#undef _POSIX_THREAD_THREADS_MAX\n#define _POSIX_THREAD_THREADS_MAX               64\n\n  /* Arbitrary value */\n#undef PTHREAD_THREADS_MAX\n#define PTHREAD_THREADS_MAX                     2019\n\n#undef _POSIX_SEM_NSEMS_MAX\n#define _POSIX_SEM_NSEMS_MAX                    256\n\n  /* Arbitrary value */\n#undef SEM_NSEMS_MAX\n#define SEM_NSEMS_MAX                           1024\n\n#undef _POSIX_SEM_VALUE_MAX\n#define _POSIX_SEM_VALUE_MAX                    32767\n\n#undef SEM_VALUE_MAX\n#define SEM_VALUE_MAX                           INT_MAX\n\n\n#if defined(__GNUC__) && !defined(__declspec)\n# error Please upgrade your GNU compiler to one that supports __declspec.\n#endif\n\n/*\n * When building the library, you should define PTW32_BUILD so that\n * the variables/functions are exported correctly. When using the library,\n * do NOT define PTW32_BUILD, and then the variables/functions will\n * be imported correctly.\n */\n#if !defined(PTW32_STATIC_LIB)\n#  if defined(PTW32_BUILD)\n#    define PTW32_DLLPORT __declspec (dllexport)\n#  else\n#    define PTW32_DLLPORT __declspec (dllimport)\n#  endif\n#else\n#  define PTW32_DLLPORT\n#endif\n\n/*\n * The Open Watcom C/C++ compiler uses a non-standard calling convention\n * that passes function args in registers unless __cdecl is explicitly specified\n * in exposed function prototypes.\n *\n * We force all calls to cdecl even though this could slow Watcom code down\n * slightly. If you know that the Watcom compiler will be used to build both\n * the DLL and application, then you can probably define this as a null string.\n * Remember that pthread.h (this file) is used for both the DLL and application builds.\n */\n#define PTW32_CDECL __cdecl\n\n#if defined(_UWIN) && PTW32_LEVEL >= PTW32_LEVEL_MAX\n#   include     <sys/types.h>\n#else\n/*\n * Generic handle type - intended to extend uniqueness beyond\n * that available with a simple pointer. It should scale for either\n * IA-32 or IA-64.\n */\ntypedef struct {\n    void * p;                   /* Pointer to actual object */\n    unsigned int x;             /* Extra information - reuse count etc */\n} ptw32_handle_t;\n\ntypedef ptw32_handle_t pthread_t;\ntypedef struct pthread_attr_t_ * pthread_attr_t;\ntypedef struct pthread_once_t_ pthread_once_t;\ntypedef struct pthread_key_t_ * pthread_key_t;\ntypedef struct pthread_mutex_t_ * pthread_mutex_t;\ntypedef struct pthread_mutexattr_t_ * pthread_mutexattr_t;\ntypedef struct pthread_cond_t_ * pthread_cond_t;\ntypedef struct pthread_condattr_t_ * pthread_condattr_t;\n#endif\ntypedef struct pthread_rwlock_t_ * pthread_rwlock_t;\ntypedef struct pthread_rwlockattr_t_ * pthread_rwlockattr_t;\ntypedef struct pthread_spinlock_t_ * pthread_spinlock_t;\ntypedef struct pthread_barrier_t_ * pthread_barrier_t;\ntypedef struct pthread_barrierattr_t_ * pthread_barrierattr_t;\n\n/*\n * ====================\n * ====================\n * POSIX Threads\n * ====================\n * ====================\n */\n\nenum {\n/*\n * pthread_attr_{get,set}detachstate\n */\n  PTHREAD_CREATE_JOINABLE       = 0,  /* Default */\n  PTHREAD_CREATE_DETACHED       = 1,\n\n/*\n * pthread_attr_{get,set}inheritsched\n */\n  PTHREAD_INHERIT_SCHED         = 0,\n  PTHREAD_EXPLICIT_SCHED        = 1,  /* Default */\n\n/*\n * pthread_{get,set}scope\n */\n  PTHREAD_SCOPE_PROCESS         = 0,\n  PTHREAD_SCOPE_SYSTEM          = 1,  /* Default */\n\n/*\n * pthread_setcancelstate paramters\n */\n  PTHREAD_CANCEL_ENABLE         = 0,  /* Default */\n  PTHREAD_CANCEL_DISABLE        = 1,\n\n/*\n * pthread_setcanceltype parameters\n */\n  PTHREAD_CANCEL_ASYNCHRONOUS   = 0,\n  PTHREAD_CANCEL_DEFERRED       = 1,  /* Default */\n\n/*\n * pthread_mutexattr_{get,set}pshared\n * pthread_condattr_{get,set}pshared\n */\n  PTHREAD_PROCESS_PRIVATE       = 0,\n  PTHREAD_PROCESS_SHARED        = 1,\n\n/*\n * pthread_mutexattr_{get,set}robust\n */\n  PTHREAD_MUTEX_STALLED         = 0,  /* Default */\n  PTHREAD_MUTEX_ROBUST          = 1,\n\n/*\n * pthread_barrier_wait\n */\n  PTHREAD_BARRIER_SERIAL_THREAD = -1\n};\n\n/*\n * ====================\n * ====================\n * Cancelation\n * ====================\n * ====================\n */\n#define PTHREAD_CANCELED       ((void *)(size_t) -1)\n\n\n/*\n * ====================\n * ====================\n * Once Key\n * ====================\n * ====================\n */\n#define PTHREAD_ONCE_INIT       { PTW32_FALSE, 0, 0, 0}\n\nstruct pthread_once_t_\n{\n  int          done;        /* indicates if user function has been executed */\n  void *       lock;\n  int          reserved1;\n  int          reserved2;\n};\n\n\n/*\n * ====================\n * ====================\n * Object initialisers\n * ====================\n * ====================\n */\n#define PTHREAD_MUTEX_INITIALIZER ((pthread_mutex_t)(size_t) -1)\n#define PTHREAD_RECURSIVE_MUTEX_INITIALIZER ((pthread_mutex_t)(size_t) -2)\n#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER ((pthread_mutex_t)(size_t) -3)\n\n/*\n * Compatibility with LinuxThreads\n */\n#define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP PTHREAD_RECURSIVE_MUTEX_INITIALIZER\n#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP PTHREAD_ERRORCHECK_MUTEX_INITIALIZER\n\n#define PTHREAD_COND_INITIALIZER ((pthread_cond_t)(size_t) -1)\n\n#define PTHREAD_RWLOCK_INITIALIZER ((pthread_rwlock_t)(size_t) -1)\n\n#define PTHREAD_SPINLOCK_INITIALIZER ((pthread_spinlock_t)(size_t) -1)\n\n\n/*\n * Mutex types.\n */\nenum\n{\n  /* Compatibility with LinuxThreads */\n  PTHREAD_MUTEX_FAST_NP,\n  PTHREAD_MUTEX_RECURSIVE_NP,\n  PTHREAD_MUTEX_ERRORCHECK_NP,\n  PTHREAD_MUTEX_TIMED_NP = PTHREAD_MUTEX_FAST_NP,\n  PTHREAD_MUTEX_ADAPTIVE_NP = PTHREAD_MUTEX_FAST_NP,\n  /* For compatibility with POSIX */\n  PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_FAST_NP,\n  PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP,\n  PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP,\n  PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL\n};\n\n\ntypedef struct ptw32_cleanup_t ptw32_cleanup_t;\n\n#if defined(_MSC_VER)\n/* Disable MSVC 'anachronism used' warning */\n#pragma warning( disable : 4229 )\n#endif\n\ntypedef void (* PTW32_CDECL ptw32_cleanup_callback_t)(void *);\n\n#if defined(_MSC_VER)\n#pragma warning( default : 4229 )\n#endif\n\nstruct ptw32_cleanup_t\n{\n  ptw32_cleanup_callback_t routine;\n  void *arg;\n  struct ptw32_cleanup_t *prev;\n};\n\n#if defined(__CLEANUP_SEH)\n        /*\n         * WIN32 SEH version of cancel cleanup.\n         */\n\n#define pthread_cleanup_push( _rout, _arg ) \\\n        { \\\n            ptw32_cleanup_t     _cleanup; \\\n            \\\n        _cleanup.routine        = (ptw32_cleanup_callback_t)(_rout); \\\n            _cleanup.arg        = (_arg); \\\n            __try \\\n              { \\\n\n#define pthread_cleanup_pop( _execute ) \\\n              } \\\n            __finally \\\n                { \\\n                    if( _execute || AbnormalTermination()) \\\n                      { \\\n                          (*(_cleanup.routine))( _cleanup.arg ); \\\n                      } \\\n                } \\\n        }\n\n#else /* __CLEANUP_SEH */\n\n#if defined(__CLEANUP_C)\n\n        /*\n         * C implementation of PThreads cancel cleanup\n         */\n\n#define pthread_cleanup_push( _rout, _arg ) \\\n        { \\\n            ptw32_cleanup_t     _cleanup; \\\n            \\\n            ptw32_push_cleanup( &_cleanup, (ptw32_cleanup_callback_t) (_rout), (_arg) ); \\\n\n#define pthread_cleanup_pop( _execute ) \\\n            (void) ptw32_pop_cleanup( _execute ); \\\n        }\n\n#else /* __CLEANUP_C */\n\n#if defined(__CLEANUP_CXX)\n\n        /*\n         * C++ version of cancel cleanup.\n         * - John E. Bossom.\n         */\n\n        class PThreadCleanup {\n          /*\n           * PThreadCleanup\n           *\n           * Purpose\n           *      This class is a C++ helper class that is\n           *      used to implement pthread_cleanup_push/\n           *      pthread_cleanup_pop.\n           *      The destructor of this class automatically\n           *      pops the pushed cleanup routine regardless\n           *      of how the code exits the scope\n           *      (i.e. such as by an exception)\n           */\n      ptw32_cleanup_callback_t cleanUpRout;\n          void    *       obj;\n          int             executeIt;\n\n        public:\n          PThreadCleanup() :\n            cleanUpRout( 0 ),\n            obj( 0 ),\n            executeIt( 0 )\n            /*\n             * No cleanup performed\n             */\n            {\n            }\n\n          PThreadCleanup(\n             ptw32_cleanup_callback_t routine,\n                         void    *       arg ) :\n            cleanUpRout( routine ),\n            obj( arg ),\n            executeIt( 1 )\n            /*\n             * Registers a cleanup routine for 'arg'\n             */\n            {\n            }\n\n          ~PThreadCleanup()\n            {\n              if ( executeIt && ((void *) cleanUpRout != (void *) 0) )\n                {\n                  (void) (*cleanUpRout)( obj );\n                }\n            }\n\n          void execute( int exec )\n            {\n              executeIt = exec;\n            }\n        };\n\n        /*\n         * C++ implementation of PThreads cancel cleanup;\n         * This implementation takes advantage of a helper\n         * class who's destructor automatically calls the\n         * cleanup routine if we exit our scope weirdly\n         */\n#define pthread_cleanup_push( _rout, _arg ) \\\n        { \\\n            PThreadCleanup  cleanup((ptw32_cleanup_callback_t)(_rout), \\\n                                    (void *) (_arg) );\n\n#define pthread_cleanup_pop( _execute ) \\\n            cleanup.execute( _execute ); \\\n        }\n\n#else\n\n#error ERROR [__FILE__, line __LINE__]: Cleanup type undefined.\n\n#endif /* __CLEANUP_CXX */\n\n#endif /* __CLEANUP_C */\n\n#endif /* __CLEANUP_SEH */\n\n/*\n * ===============\n * ===============\n * Methods\n * ===============\n * ===============\n */\n\n/*\n * PThread Attribute Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_init (pthread_attr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_destroy (pthread_attr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_getdetachstate (const pthread_attr_t * attr,\n                                         int *detachstate);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_getstackaddr (const pthread_attr_t * attr,\n                                       void **stackaddr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_getstacksize (const pthread_attr_t * attr,\n                                       size_t * stacksize);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_setdetachstate (pthread_attr_t * attr,\n                                         int detachstate);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_setstackaddr (pthread_attr_t * attr,\n                                       void *stackaddr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_setstacksize (pthread_attr_t * attr,\n                                       size_t stacksize);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_getschedparam (const pthread_attr_t *attr,\n                                        struct sched_param *param);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_setschedparam (pthread_attr_t *attr,\n                                        const struct sched_param *param);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_setschedpolicy (pthread_attr_t *,\n                                         int);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_getschedpolicy (const pthread_attr_t *,\n                                         int *);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_setinheritsched(pthread_attr_t * attr,\n                                         int inheritsched);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_getinheritsched(const pthread_attr_t * attr,\n                                         int * inheritsched);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_setscope (pthread_attr_t *,\n                                   int);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_attr_getscope (const pthread_attr_t *,\n                                   int *);\n\n/*\n * PThread Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_create (pthread_t * tid,\n                            const pthread_attr_t * attr,\n                            void *(PTW32_CDECL *start) (void *),\n                            void *arg);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_detach (pthread_t tid);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_equal (pthread_t t1,\n                           pthread_t t2);\n\nPTW32_DLLPORT void PTW32_CDECL pthread_exit (void *value_ptr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_join (pthread_t thread,\n                          void **value_ptr);\n\nPTW32_DLLPORT pthread_t PTW32_CDECL pthread_self (void);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_cancel (pthread_t thread);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_setcancelstate (int state,\n                                    int *oldstate);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_setcanceltype (int type,\n                                   int *oldtype);\n\nPTW32_DLLPORT void PTW32_CDECL pthread_testcancel (void);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_once (pthread_once_t * once_control,\n                          void (PTW32_CDECL *init_routine) (void));\n\n#if PTW32_LEVEL >= PTW32_LEVEL_MAX\nPTW32_DLLPORT ptw32_cleanup_t * PTW32_CDECL ptw32_pop_cleanup (int execute);\n\nPTW32_DLLPORT void PTW32_CDECL ptw32_push_cleanup (ptw32_cleanup_t * cleanup,\n                                 ptw32_cleanup_callback_t routine,\n                                 void *arg);\n#endif /* PTW32_LEVEL >= PTW32_LEVEL_MAX */\n\n/*\n * Thread Specific Data Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_key_create (pthread_key_t * key,\n                                void (PTW32_CDECL *destructor) (void *));\n\nPTW32_DLLPORT int PTW32_CDECL pthread_key_delete (pthread_key_t key);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_setspecific (pthread_key_t key,\n                                 const void *value);\n\nPTW32_DLLPORT void * PTW32_CDECL pthread_getspecific (pthread_key_t key);\n\n\n/*\n * Mutex Attribute Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_mutexattr_init (pthread_mutexattr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutexattr_destroy (pthread_mutexattr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutexattr_getpshared (const pthread_mutexattr_t\n                                          * attr,\n                                          int *pshared);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutexattr_setpshared (pthread_mutexattr_t * attr,\n                                          int pshared);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutexattr_settype (pthread_mutexattr_t * attr, int kind);\nPTW32_DLLPORT int PTW32_CDECL pthread_mutexattr_gettype (const pthread_mutexattr_t * attr, int *kind);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutexattr_setrobust(\n                                           pthread_mutexattr_t *attr,\n                                           int robust);\nPTW32_DLLPORT int PTW32_CDECL pthread_mutexattr_getrobust(\n                                           const pthread_mutexattr_t * attr,\n                                           int * robust);\n\n/*\n * Barrier Attribute Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_barrierattr_init (pthread_barrierattr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_barrierattr_destroy (pthread_barrierattr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_barrierattr_getpshared (const pthread_barrierattr_t\n                                            * attr,\n                                            int *pshared);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_barrierattr_setpshared (pthread_barrierattr_t * attr,\n                                            int pshared);\n\n/*\n * Mutex Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_mutex_init (pthread_mutex_t * mutex,\n                                const pthread_mutexattr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutex_destroy (pthread_mutex_t * mutex);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutex_lock (pthread_mutex_t * mutex);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutex_timedlock(pthread_mutex_t * mutex,\n                                    const struct timespec *abstime);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutex_trylock (pthread_mutex_t * mutex);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutex_unlock (pthread_mutex_t * mutex);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_mutex_consistent (pthread_mutex_t * mutex);\n\n/*\n * Spinlock Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_spin_init (pthread_spinlock_t * lock, int pshared);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_spin_destroy (pthread_spinlock_t * lock);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_spin_lock (pthread_spinlock_t * lock);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_spin_trylock (pthread_spinlock_t * lock);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_spin_unlock (pthread_spinlock_t * lock);\n\n/*\n * Barrier Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_barrier_init (pthread_barrier_t * barrier,\n                                  const pthread_barrierattr_t * attr,\n                                  unsigned int count);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_barrier_destroy (pthread_barrier_t * barrier);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_barrier_wait (pthread_barrier_t * barrier);\n\n/*\n * Condition Variable Attribute Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_condattr_init (pthread_condattr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_condattr_destroy (pthread_condattr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_condattr_getpshared (const pthread_condattr_t * attr,\n                                         int *pshared);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_condattr_setpshared (pthread_condattr_t * attr,\n                                         int pshared);\n\n/*\n * Condition Variable Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_cond_init (pthread_cond_t * cond,\n                               const pthread_condattr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_cond_destroy (pthread_cond_t * cond);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_cond_wait (pthread_cond_t * cond,\n                               pthread_mutex_t * mutex);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_cond_timedwait (pthread_cond_t * cond,\n                                    pthread_mutex_t * mutex,\n                                    const struct timespec *abstime);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_cond_signal (pthread_cond_t * cond);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_cond_broadcast (pthread_cond_t * cond);\n\n/*\n * Scheduling\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_setschedparam (pthread_t thread,\n                                   int policy,\n                                   const struct sched_param *param);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_getschedparam (pthread_t thread,\n                                   int *policy,\n                                   struct sched_param *param);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_setconcurrency (int);\n \nPTW32_DLLPORT int PTW32_CDECL pthread_getconcurrency (void);\n\n/*\n * Read-Write Lock Functions\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlock_init(pthread_rwlock_t *lock,\n                                const pthread_rwlockattr_t *attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlock_destroy(pthread_rwlock_t *lock);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlock_tryrdlock(pthread_rwlock_t *);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlock_trywrlock(pthread_rwlock_t *);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlock_rdlock(pthread_rwlock_t *lock);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlock_timedrdlock(pthread_rwlock_t *lock,\n                                       const struct timespec *abstime);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlock_wrlock(pthread_rwlock_t *lock);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlock_timedwrlock(pthread_rwlock_t *lock,\n                                       const struct timespec *abstime);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlock_unlock(pthread_rwlock_t *lock);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlockattr_init (pthread_rwlockattr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlockattr_destroy (pthread_rwlockattr_t * attr);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlockattr_getpshared (const pthread_rwlockattr_t * attr,\n                                           int *pshared);\n\nPTW32_DLLPORT int PTW32_CDECL pthread_rwlockattr_setpshared (pthread_rwlockattr_t * attr,\n                                           int pshared);\n\n#if PTW32_LEVEL >= PTW32_LEVEL_MAX - 1\n\n/*\n * Signal Functions. Should be defined in <signal.h> but MSVC and MinGW32\n * already have signal.h that don't define these.\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_kill(pthread_t thread, int sig);\n\n/*\n * Non-portable functions\n */\n\n/*\n * Compatibility with Linux.\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_mutexattr_setkind_np(pthread_mutexattr_t * attr,\n                                         int kind);\nPTW32_DLLPORT int PTW32_CDECL pthread_mutexattr_getkind_np(pthread_mutexattr_t * attr,\n                                         int *kind);\n\n/*\n * Possibly supported by other POSIX threads implementations\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_delay_np (struct timespec * interval);\nPTW32_DLLPORT int PTW32_CDECL pthread_num_processors_np(void);\nPTW32_DLLPORT unsigned __int64 PTW32_CDECL pthread_getunique_np(pthread_t thread);\n\n/*\n * Useful if an application wants to statically link\n * the lib rather than load the DLL at run-time.\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_win32_process_attach_np(void);\nPTW32_DLLPORT int PTW32_CDECL pthread_win32_process_detach_np(void);\nPTW32_DLLPORT int PTW32_CDECL pthread_win32_thread_attach_np(void);\nPTW32_DLLPORT int PTW32_CDECL pthread_win32_thread_detach_np(void);\n\n/*\n * Features that are auto-detected at load/run time.\n */\nPTW32_DLLPORT int PTW32_CDECL pthread_win32_test_features_np(int);\nenum ptw32_features {\n  PTW32_SYSTEM_INTERLOCKED_COMPARE_EXCHANGE = 0x0001, /* System provides it. */\n  PTW32_ALERTABLE_ASYNC_CANCEL              = 0x0002  /* Can cancel blocked threads. */\n};\n\n/*\n * Register a system time change with the library.\n * Causes the library to perform various functions\n * in response to the change. Should be called whenever\n * the application's top level window receives a\n * WM_TIMECHANGE message. It can be passed directly to\n * pthread_create() as a new thread if desired.\n */\nPTW32_DLLPORT void * PTW32_CDECL pthread_timechange_handler_np(void *);\n\n#endif /*PTW32_LEVEL >= PTW32_LEVEL_MAX - 1 */\n\n#if PTW32_LEVEL >= PTW32_LEVEL_MAX\n\n/*\n * Returns the Win32 HANDLE for the POSIX thread.\n */\nPTW32_DLLPORT HANDLE PTW32_CDECL pthread_getw32threadhandle_np(pthread_t thread);\n/*\n * Returns the win32 thread ID for POSIX thread.\n */\nPTW32_DLLPORT DWORD PTW32_CDECL pthread_getw32threadid_np (pthread_t thread);\n\n\n/*\n * Protected Methods\n *\n * This function blocks until the given WIN32 handle\n * is signaled or pthread_cancel had been called.\n * This function allows the caller to hook into the\n * PThreads cancel mechanism. It is implemented using\n *\n *              WaitForMultipleObjects\n *\n * on 'waitHandle' and a manually reset WIN32 Event\n * used to implement pthread_cancel. The 'timeout'\n * argument to TimedWait is simply passed to\n * WaitForMultipleObjects.\n */\nPTW32_DLLPORT int PTW32_CDECL pthreadCancelableWait (HANDLE waitHandle);\nPTW32_DLLPORT int PTW32_CDECL pthreadCancelableTimedWait (HANDLE waitHandle,\n                                        DWORD timeout);\n\n#endif /* PTW32_LEVEL >= PTW32_LEVEL_MAX */\n\n/*\n * Thread-Safe C Runtime Library Mappings.\n */\n#if !defined(_UWIN)\n#  if defined(NEED_ERRNO)\n     PTW32_DLLPORT int * PTW32_CDECL _errno( void );\n#  else\n#    if !defined(errno)\n#      if (defined(_MT) || defined(_DLL))\n         __declspec(dllimport) extern int * __cdecl _errno(void);\n#        define errno   (*_errno())\n#      endif\n#    endif\n#  endif\n#endif\n\n/*\n * Some compiler environments don't define some things.\n */\n#if defined(__BORLANDC__)\n#  define _ftime ftime\n#  define _timeb timeb\n#endif\n\n#if defined(__cplusplus)\n\n/*\n * Internal exceptions\n */\nclass ptw32_exception {};\nclass ptw32_exception_cancel : public ptw32_exception {};\nclass ptw32_exception_exit   : public ptw32_exception {};\n\n#endif\n\n#if PTW32_LEVEL >= PTW32_LEVEL_MAX\n\n/* FIXME: This is only required if the library was built using SEH */\n/*\n * Get internal SEH tag\n */\nPTW32_DLLPORT DWORD PTW32_CDECL ptw32_get_exception_services_code(void);\n\n#endif /* PTW32_LEVEL >= PTW32_LEVEL_MAX */\n\n#if !defined(PTW32_BUILD)\n\n#if defined(__CLEANUP_SEH)\n\n/*\n * Redefine the SEH __except keyword to ensure that applications\n * propagate our internal exceptions up to the library's internal handlers.\n */\n#define __except( E ) \\\n        __except( ( GetExceptionCode() == ptw32_get_exception_services_code() ) \\\n                 ? EXCEPTION_CONTINUE_SEARCH : ( E ) )\n\n#endif /* __CLEANUP_SEH */\n\n#if defined(__CLEANUP_CXX)\n\n/*\n * Redefine the C++ catch keyword to ensure that applications\n * propagate our internal exceptions up to the library's internal handlers.\n */\n#if defined(_MSC_VER)\n        /*\n         * WARNING: Replace any 'catch( ... )' with 'PtW32CatchAll'\n         * if you want Pthread-Win32 cancelation and pthread_exit to work.\n         */\n\n#if !defined(PtW32NoCatchWarn)\n\n#pragma message(\"Specify \\\"/DPtW32NoCatchWarn\\\" compiler flag to skip this message.\")\n#pragma message(\"------------------------------------------------------------------\")\n#pragma message(\"When compiling applications with MSVC++ and C++ exception handling:\")\n#pragma message(\"  Replace any 'catch( ... )' in routines called from POSIX threads\")\n#pragma message(\"  with 'PtW32CatchAll' or 'CATCHALL' if you want POSIX thread\")\n#pragma message(\"  cancelation and pthread_exit to work. For example:\")\n#pragma message(\"\")\n#pragma message(\"    #if defined(PtW32CatchAll)\")\n#pragma message(\"      PtW32CatchAll\")\n#pragma message(\"    #else\")\n#pragma message(\"      catch(...)\")\n#pragma message(\"    #endif\")\n#pragma message(\"        {\")\n#pragma message(\"          /* Catchall block processing */\")\n#pragma message(\"        }\")\n#pragma message(\"------------------------------------------------------------------\")\n\n#endif\n\n#define PtW32CatchAll \\\n        catch( ptw32_exception & ) { throw; } \\\n        catch( ... )\n\n#else /* _MSC_VER */\n\n#define catch( E ) \\\n        catch( ptw32_exception & ) { throw; } \\\n        catch( E )\n\n#endif /* _MSC_VER */\n\n#endif /* __CLEANUP_CXX */\n\n#endif /* ! PTW32_BUILD */\n\n#if defined(__cplusplus)\n}                               /* End of extern \"C\" */\n#endif                          /* __cplusplus */\n\n#if defined(PTW32__HANDLE_DEF)\n# undef HANDLE\n#endif\n#if defined(PTW32__DWORD_DEF)\n# undef DWORD\n#endif\n\n#undef PTW32_LEVEL\n#undef PTW32_LEVEL_MAX\n\n#endif /* ! RC_INVOKED */\n\n#endif /* PTHREAD_H */\n"
  },
  {
    "path": "src/3rdparty/pthread/include/sched.h",
    "content": "/*\n * Module: sched.h\n *\n * Purpose:\n *      Provides an implementation of POSIX realtime extensions\n *      as defined in \n *\n *              POSIX 1003.1b-1993      (POSIX.1b)\n *\n * --------------------------------------------------------------------------\n *\n *      Pthreads-win32 - POSIX Threads Library for Win32\n *      Copyright(C) 1998 John E. Bossom\n *      Copyright(C) 1999,2005 Pthreads-win32 contributors\n * \n *      Contact Email: rpj@callisto.canberra.edu.au\n * \n *      The current list of contributors is contained\n *      in the file CONTRIBUTORS included with the source\n *      code distribution. The list can also be seen at the\n *      following World Wide Web location:\n *      http://sources.redhat.com/pthreads-win32/contributors.html\n * \n *      This library is free software; you can redistribute it and/or\n *      modify it under the terms of the GNU Lesser General Public\n *      License as published by the Free Software Foundation; either\n *      version 2 of the License, or (at your option) any later version.\n * \n *      This library is distributed in the hope that it will be useful,\n *      but WITHOUT ANY WARRANTY; without even the implied warranty of\n *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n *      Lesser General Public License for more details.\n * \n *      You should have received a copy of the GNU Lesser General Public\n *      License along with this library in the file COPYING.LIB;\n *      if not, write to the Free Software Foundation, Inc.,\n *      59 Temple Place - Suite 330, Boston, MA 02111-1307, USA\n */\n#if !defined(_SCHED_H)\n#define _SCHED_H\n\n#undef PTW32_SCHED_LEVEL\n\n#if defined(_POSIX_SOURCE)\n#define PTW32_SCHED_LEVEL 0\n/* Early POSIX */\n#endif\n\n#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 199309\n#undef PTW32_SCHED_LEVEL\n#define PTW32_SCHED_LEVEL 1\n/* Include 1b, 1c and 1d */\n#endif\n\n#if defined(INCLUDE_NP)\n#undef PTW32_SCHED_LEVEL\n#define PTW32_SCHED_LEVEL 2\n/* Include Non-Portable extensions */\n#endif\n\n#define PTW32_SCHED_LEVEL_MAX 3\n\n#if ( defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 200112 )  || !defined(PTW32_SCHED_LEVEL)\n#define PTW32_SCHED_LEVEL PTW32_SCHED_LEVEL_MAX\n/* Include everything */\n#endif\n\n\n#if defined(__GNUC__) && !defined(__declspec)\n# error Please upgrade your GNU compiler to one that supports __declspec.\n#endif\n\n/*\n * When building the library, you should define PTW32_BUILD so that\n * the variables/functions are exported correctly. When using the library,\n * do NOT define PTW32_BUILD, and then the variables/functions will\n * be imported correctly.\n */\n#if !defined(PTW32_STATIC_LIB)\n#  if defined(PTW32_BUILD)\n#    define PTW32_DLLPORT __declspec (dllexport)\n#  else\n#    define PTW32_DLLPORT __declspec (dllimport)\n#  endif\n#else\n#  define PTW32_DLLPORT\n#endif\n\n/*\n * This is a duplicate of what is in the autoconf config.h,\n * which is only used when building the pthread-win32 libraries.\n */\n\n#if !defined(PTW32_CONFIG_H)\n#  if defined(WINCE)\n#    define NEED_ERRNO\n#    define NEED_SEM\n#  endif\n#  if defined(__MINGW64__)\n#    define HAVE_STRUCT_TIMESPEC\n#    define HAVE_MODE_T\n#  elif defined(_UWIN) || defined(__MINGW32__)\n#    define HAVE_MODE_T\n#  endif\n#endif\n\n/*\n *\n */\n\n#if PTW32_SCHED_LEVEL >= PTW32_SCHED_LEVEL_MAX\n#if defined(NEED_ERRNO)\n#include \"need_errno.h\"\n#else\n#include <errno.h>\n#endif\n#endif /* PTW32_SCHED_LEVEL >= PTW32_SCHED_LEVEL_MAX */\n\n#if (defined(__MINGW64__) || defined(__MINGW32__)) || defined(_UWIN)\n# if PTW32_SCHED_LEVEL >= PTW32_SCHED_LEVEL_MAX\n/* For pid_t */\n#  include <sys/types.h>\n/* Required by Unix 98 */\n#  include <time.h>\n# else\n   typedef int pid_t;\n# endif\n#else\n typedef int pid_t;\n#endif\n\n/* Thread scheduling policies */\n\nenum {\n  SCHED_OTHER = 0,\n  SCHED_FIFO,\n  SCHED_RR,\n  SCHED_MIN   = SCHED_OTHER,\n  SCHED_MAX   = SCHED_RR\n};\n\nstruct sched_param {\n  int sched_priority;\n};\n\n#if defined(__cplusplus)\nextern \"C\"\n{\n#endif                          /* __cplusplus */\n\nPTW32_DLLPORT int __cdecl sched_yield (void);\n\nPTW32_DLLPORT int __cdecl sched_get_priority_min (int policy);\n\nPTW32_DLLPORT int __cdecl sched_get_priority_max (int policy);\n\nPTW32_DLLPORT int __cdecl sched_setscheduler (pid_t pid, int policy);\n\nPTW32_DLLPORT int __cdecl sched_getscheduler (pid_t pid);\n\n/*\n * Note that this macro returns ENOTSUP rather than\n * ENOSYS as might be expected. However, returning ENOSYS\n * should mean that sched_get_priority_{min,max} are\n * not implemented as well as sched_rr_get_interval.\n * This is not the case, since we just don't support\n * round-robin scheduling. Therefore I have chosen to\n * return the same value as sched_setscheduler when\n * SCHED_RR is passed to it.\n */\n#define sched_rr_get_interval(_pid, _interval) \\\n  ( errno = ENOTSUP, (int) -1 )\n\n\n#if defined(__cplusplus)\n}                               /* End of extern \"C\" */\n#endif                          /* __cplusplus */\n\n#undef PTW32_SCHED_LEVEL\n#undef PTW32_SCHED_LEVEL_MAX\n\n#endif                          /* !_SCHED_H */\n\n"
  },
  {
    "path": "src/3rdparty/pthread/include/semaphore.h",
    "content": "/*\n * Module: semaphore.h\n *\n * Purpose:\n *\tSemaphores aren't actually part of the PThreads standard.\n *\tThey are defined by the POSIX Standard:\n *\n *\t\tPOSIX 1003.1b-1993\t(POSIX.1b)\n *\n * --------------------------------------------------------------------------\n *\n *      Pthreads-win32 - POSIX Threads Library for Win32\n *      Copyright(C) 1998 John E. Bossom\n *      Copyright(C) 1999,2005 Pthreads-win32 contributors\n * \n *      Contact Email: rpj@callisto.canberra.edu.au\n * \n *      The current list of contributors is contained\n *      in the file CONTRIBUTORS included with the source\n *      code distribution. The list can also be seen at the\n *      following World Wide Web location:\n *      http://sources.redhat.com/pthreads-win32/contributors.html\n * \n *      This library is free software; you can redistribute it and/or\n *      modify it under the terms of the GNU Lesser General Public\n *      License as published by the Free Software Foundation; either\n *      version 2 of the License, or (at your option) any later version.\n * \n *      This library is distributed in the hope that it will be useful,\n *      but WITHOUT ANY WARRANTY; without even the implied warranty of\n *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n *      Lesser General Public License for more details.\n * \n *      You should have received a copy of the GNU Lesser General Public\n *      License along with this library in the file COPYING.LIB;\n *      if not, write to the Free Software Foundation, Inc.,\n *      59 Temple Place - Suite 330, Boston, MA 02111-1307, USA\n */\n#if !defined( SEMAPHORE_H )\n#define SEMAPHORE_H\n\n#undef PTW32_SEMAPHORE_LEVEL\n\n#if defined(_POSIX_SOURCE)\n#define PTW32_SEMAPHORE_LEVEL 0\n/* Early POSIX */\n#endif\n\n#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 199309\n#undef PTW32_SEMAPHORE_LEVEL\n#define PTW32_SEMAPHORE_LEVEL 1\n/* Include 1b, 1c and 1d */\n#endif\n\n#if defined(INCLUDE_NP)\n#undef PTW32_SEMAPHORE_LEVEL\n#define PTW32_SEMAPHORE_LEVEL 2\n/* Include Non-Portable extensions */\n#endif\n\n#define PTW32_SEMAPHORE_LEVEL_MAX 3\n\n#if !defined(PTW32_SEMAPHORE_LEVEL)\n#define PTW32_SEMAPHORE_LEVEL PTW32_SEMAPHORE_LEVEL_MAX\n/* Include everything */\n#endif\n\n#if defined(__GNUC__) && ! defined (__declspec)\n# error Please upgrade your GNU compiler to one that supports __declspec.\n#endif\n\n/*\n * When building the library, you should define PTW32_BUILD so that\n * the variables/functions are exported correctly. When using the library,\n * do NOT define PTW32_BUILD, and then the variables/functions will\n * be imported correctly.\n */\n#if !defined(PTW32_STATIC_LIB)\n#  if defined(PTW32_BUILD)\n#    define PTW32_DLLPORT __declspec (dllexport)\n#  else\n#    define PTW32_DLLPORT __declspec (dllimport)\n#  endif\n#else\n#  define PTW32_DLLPORT\n#endif\n\n/*\n * This is a duplicate of what is in the autoconf config.h,\n * which is only used when building the pthread-win32 libraries.\n */\n\n#if !defined(PTW32_CONFIG_H)\n#  if defined(WINCE)\n#    define NEED_ERRNO\n#    define NEED_SEM\n#  endif\n#  if defined(__MINGW64__)\n#    define HAVE_STRUCT_TIMESPEC\n#    define HAVE_MODE_T\n#  elif defined(_UWIN) || defined(__MINGW32__)\n#    define HAVE_MODE_T\n#  endif\n#endif\n\n/*\n *\n */\n\n#if PTW32_SEMAPHORE_LEVEL >= PTW32_SEMAPHORE_LEVEL_MAX\n#if defined(NEED_ERRNO)\n#include \"need_errno.h\"\n#else\n#include <errno.h>\n#endif\n#endif /* PTW32_SEMAPHORE_LEVEL >= PTW32_SEMAPHORE_LEVEL_MAX */\n\n#define _POSIX_SEMAPHORES\n\n#if defined(__cplusplus)\nextern \"C\"\n{\n#endif\t\t\t\t/* __cplusplus */\n\n#if !defined(HAVE_MODE_T)\ntypedef unsigned int mode_t;\n#endif\n\n\ntypedef struct sem_t_ * sem_t;\n\nPTW32_DLLPORT int __cdecl sem_init (sem_t * sem,\n\t\t\t    int pshared,\n\t\t\t    unsigned int value);\n\nPTW32_DLLPORT int __cdecl sem_destroy (sem_t * sem);\n\nPTW32_DLLPORT int __cdecl sem_trywait (sem_t * sem);\n\nPTW32_DLLPORT int __cdecl sem_wait (sem_t * sem);\n\nPTW32_DLLPORT int __cdecl sem_timedwait (sem_t * sem,\n\t\t\t\t const struct timespec * abstime);\n\nPTW32_DLLPORT int __cdecl sem_post (sem_t * sem);\n\nPTW32_DLLPORT int __cdecl sem_post_multiple (sem_t * sem,\n\t\t\t\t     int count);\n\nPTW32_DLLPORT int __cdecl sem_open (const char * name,\n\t\t\t    int oflag,\n\t\t\t    mode_t mode,\n\t\t\t    unsigned int value);\n\nPTW32_DLLPORT int __cdecl sem_close (sem_t * sem);\n\nPTW32_DLLPORT int __cdecl sem_unlink (const char * name);\n\nPTW32_DLLPORT int __cdecl sem_getvalue (sem_t * sem,\n\t\t\t\tint * sval);\n\n#if defined(__cplusplus)\n}\t\t\t\t/* End of extern \"C\" */\n#endif\t\t\t\t/* __cplusplus */\n\n#undef PTW32_SEMAPHORE_LEVEL\n#undef PTW32_SEMAPHORE_LEVEL_MAX\n\n#endif\t\t\t\t/* !SEMAPHORE_H */\n"
  },
  {
    "path": "src/ardrone/ardrone.cpp",
    "content": "// -------------------------------------------------------------------------\n// CV Drone (= OpenCV + AR.Drone)\n// Copyright(C) 2016 puku0x\n// https://github.com/puku0x/cvdrone\n//\n// This source file is part of CV Drone library.\n//\n// This library is free software; you can redistribute it and/or\n// modify it under the terms of EITHER:\n// (1) The GNU Lesser General Public License as published by the Free\n//     Software Foundation; either version 2.1 of the License, or (at\n//     your option) any later version. The text of the GNU Lesser\n//     General Public License is included with this library in the\n//     file cvdrone-license-LGPL.txt.\n// (2) The BSD-style license that is included with this library in\n//     the file cvdrone-license-BSD.txt.\n// \n// This library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files\n// cvdrone-license-LGPL.txt and cvdrone-license-BSD.txt for more details.\n//\n//! @file   ardrone.cpp\n//! @brief  A source file of AR.Drone class\n//\n// -------------------------------------------------------------------------\n\n#include \"ardrone.h\"\n\n// --------------------------------------------------------------------------\n//! @brief   Constructor of AR.Drone class\n//! @param   ardrone_addr IP address of AR.Drone\n//! @return  None\n// --------------------------------------------------------------------------\nARDrone::ARDrone(const char *ardrone_addr)\n{\n    // IP Address\n    strncpy(ip, ARDRONE_DEFAULT_ADDR, 16);\n\n    // Sequence number\n    seq = 0;\n\n    // Camera image\n    img = NULL;\n\n    // Version information\n    memset(&version, 0, sizeof(version));\n\n    // Navdata\n    memset(&navdata, 0, sizeof(navdata));\n\n    // Configurations\n    memset(&config, 0, sizeof(config));\n\n    // Video\n    pFormatCtx  = NULL;\n    pCodecCtx   = NULL;\n    pFrame      = NULL;\n    pFrameBGR   = NULL;\n    bufferBGR   = NULL;\n    pConvertCtx = NULL;\n\n    // Thread for AT command\n    threadCommand = NULL;\n    mutexCommand  = NULL;\n\n    // Thread for Navdata\n    threadNavdata = NULL;\n    mutexNavdata  = NULL;\n\n    // Thread for Video\n    threadVideo = NULL;\n    mutexVideo  = NULL;\n\n    // Open if the IP address was specified\n    if (ardrone_addr != NULL) {\n        open(ardrone_addr);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Destructor of ARDrone class\n//! @return  None\n// --------------------------------------------------------------------------\nARDrone::~ARDrone()\n{\n    // See you\n    close();\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Initialize the AR.Drone.\n//! @param   ardrone_addr IP address of AR.Drone\n//! @return  Result of initialization\n//! @retval  1 Success\n//! @retval  0 Failure\n// --------------------------------------------------------------------------\nint ARDrone::open(const char *ardrone_addr)\n{\n    // Initialize FFmpeg\n    av_register_all();\n    avformat_network_init();\n    av_log_set_level(AV_LOG_QUIET);\n\n    // Save IP address\n    strncpy(ip, ardrone_addr, 16);\n\n    // Get version information\n    if (!getVersionInfo()) return 0;\n    std::cout << \"AR.Drone Ver. \" << version.major << \".\" << version.minor << \".\" << version.revision << \".\" << std::endl;\n\n    // Initialize AT command\n    if (!initCommand()) return 0;\n\n    // Blink LEDs\n    setLED(ARDRONE_LED_ANIM_BLINK_GREEN);\n\n    // Initialize Navdata\n    if (!initNavdata()) return 0;\n\n    // Initialize Video\n    if (!initVideo()) return 0;\n\n    // Wait for updating the status\n    //msleep(500);\n\n    // Get configurations\n    if (!getConfig()) return 0;\n\n    // Stop LED animation\n    setLED(ARDRONE_LED_ANIM_STANDARD);\n\n    // Reset emergency\n    resetWatchDog();\n    resetEmergency();\n\n    return 1;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Update the information of the AR.Drone.\n//! @return  Result of update\n//! @retval  1 Success\n//! @retval  0 Failure\n// --------------------------------------------------------------------------\nint ARDrone::update(void)\n{\n    return 1;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Finalize the AR.Drone class.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::close(void)\n{\n    // Stop AR.Drone\n    if (!onGround()) landing();\n\n    // Stop LED animation\n    setLED(ARDRONE_LED_ANIM_STANDARD);\n\n    // Finalize video\n    finalizeVideo();\n\n    // Finalize Navdata\n    finalizeNavdata();\n\n    // Finalize AT command\n    finalizeCommand();\n}\n"
  },
  {
    "path": "src/ardrone/ardrone.h",
    "content": "#ifndef __HEADER_ARDRONE_LIB__\n#define __HEADER_ARDRONE_LIB__\n\n// -------------------------------------------------------------------------\n// CV Drone (= OpenCV + AR.Drone)\n// Copyright(C) 2016 puku0x\n// https://github.com/puku0x/cvdrone\n//\n// This source file is part of CV Drone library.\n//\n// This library is free software; you can redistribute it and/or\n// modify it under the terms of EITHER:\n// (1) The GNU Lesser General Public License as published by the Free\n//     Software Foundation; either version 2.1 of the License, or (at\n//     your option) any later version. The text of the GNU Lesser\n//     General Public License is included with this library in the\n//     file cvdrone-license-LGPL.txt.\n// (2) The BSD-style license that is included with this library in\n//     the file cvdrone-license-BSD.txt.\n// \n// This library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files\n// cvdrone-license-LGPL.txt and cvdrone-license-BSD.txt for more details.\n//\n//! @file     ardrone.h\n//! @brief    A header file of AR.Drone class\n//\n//! @mainpage Document of CV Drone\n//!           Project home: https://github.com/puku0x/cvdrone      <br>\n//!           Project Wiki: https://github.com/puku0x/cvdrone/wiki <br>\n//!           Copyright(C) 2014 puku0x                             <br>\n//\n// -------------------------------------------------------------------------\n\n// Coordinate system\n//   Front of the AR.Drone is X-axis, left is Y-axis, upper is Z-axis.\n//   Also front is 0.0 [rad], each axis CCW is positive.\n//            X\n//           +^-\n//            |\n//            |\n//    Y <-----+ (0,0)\n//            Z\n//\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <stdarg.h>\n#include <math.h>\n\n// OpenCV 1.0\n//#include <opencv/cv.h>\n//#include <opencv/highgui.h>\n\n// OpenCV 2.0\n#include <opencv2/opencv.hpp>\n\n// FFmpeg\nextern \"C\" {\n    #include <libavcodec/avcodec.h>\n    #include <libavformat/avformat.h>\n    #include <libswscale/swscale.h>\n}\n\n// POSIX threads\n#include <pthread.h>\n\n// Win32 <-> GCC\n#ifdef _WIN32\n#include <windows.h>\n#include <winsock.h>\n#define socklen_t int\n#define msleep(ms) Sleep((DWORD)ms)\n#else\n#include <errno.h>\n#include <fcntl.h>\n#include <sys/types.h>\n#include <sys/socket.h>\n#include <netinet/in.h>\n#include <arpa/inet.h>\n#include <unistd.h>\ntypedef int SOCKET;\n#define INVALID_SOCKET (-1)\n#define SOCKET_ERROR   (-1)\ninline void msleep(unsigned long ms) {\n    while (ms--) usleep(1000);\n}\n#endif\n\n// Macro definitions\n#define ARDRONE_VERSION_1           (1)             // AR.Drone 1.0\n#define ARDRONE_VERSION_2           (2)             // AR.Drone 2.0\n#define ARDRONE_SESSION_ID          \"d2e081a3\"      // SessionID\n#define ARDRONE_PROFILE_ID          \"be27e2e4\"      // Profile ID\n#define ARDRONE_APPLOCATION_ID      \"d87f7e0c\"      // Application ID\n#define ARDRONE_FTP_PORT            (5551)          // Port number for FTP\n#define ARDRONE_AUTH_PORT           (5552)\n#define ARDRONE_VIDEO_RECORDER_PORT (5553)\n#define ARDRONE_NAVDATA_PORT        (5554)          // Port for Navdata\n#define ARDRONE_VIDEO_PORT          (5555)          // Port for Video\n#define ARDRONE_AT_PORT             (5556)          // Port for AT command\n#define ARDRONE_RAW_CAPTURE_PORT    (5557)\n#define ARDRONE_PRINTF_PORT         (5558)\n#define ARDRONE_CONTROL_PORT        (5559)          // Port for configuration\n#define ARDRONE_DEFAULT_ADDR        \"192.168.1.1\"   // Default IP address of AR.Drone\n#define ARDRONE_NAVDATA_HEADER      (0x55667788)    // Header of Navdata\n\n// Math definitions\n#ifndef NULL\n#define NULL (0)\n#endif\n#ifndef M_PI\n#define M_PI (3.14159265358979323846264338327)\n#endif\n#ifndef RAD_TO_DEG\n#define RAD_TO_DEG (180/M_PI)\n#endif\n#ifndef DEG_TO_RAD\n#define DEG_TO_RAD (M_PI/180)\n#endif\n#ifndef MIN\n#define MIN(a, b)  ((a) > (b) ? (b) : (a))\n#endif\n#ifndef MAX\n#define MAX(a, b)  ((a) < (b) ? (b) : (a))\n#endif\n\n// Virtual keys\n#ifdef _WIN32\n#ifndef CV_VK_UP\n#define CV_VK_UP (VK_UP<<16)\n#endif\n#ifndef CV_VK_DOWN\n#define CV_VK_DOWN (VK_DOWN<<16)\n#endif\n#ifndef CV_VK_LEFT\n#define CV_VK_LEFT (VK_LEFT<<16)\n#endif\n#ifndef CV_VK_RIGHT\n#define CV_VK_RIGHT (VK_RIGHT<<16)\n#endif\n#else\n#if defined(__APPLE__)\n#ifndef CV_VK_UP\n#define CV_VK_UP (0xf700)\n#endif\n#ifndef CV_VK_DOWN\n#define CV_VK_DOWN (0xf701)\n#endif\n#ifndef CV_VK_LEFT\n#define CV_VK_LEFT (0xf702)\n#endif\n#ifndef CV_VK_RIGHT\n#define CV_VK_RIGHT (0xf703)\n#endif\n#else\n#ifndef CV_VK_UP\n#define CV_VK_UP (0xff52)\n#endif\n#ifndef CV_VK_DOWN\n#define CV_VK_DOWN (0xff54)\n#endif\n#ifndef CV_VK_LEFT\n#define CV_VK_LEFT (0xff51)\n#endif\n#ifndef CV_VK_RIGHT\n#define CV_VK_RIGHT (0xff53)\n#endif\n#endif\n#endif\n\n// State masks\nenum ARDRONE_STATE_MASK {\n    ARDRONE_FLY_MASK            = 1U <<  0, // FLY MASK                  : (0) Ardrone is landed, (1) Ardrone is flying\n    ARDRONE_VIDEO_MASK          = 1U <<  1, // VIDEO MASK                : (0) Video disable, (1) Video enable\n    ARDRONE_VISION_MASK         = 1U <<  2, // VISION MASK               : (0) Vision disable, (1) Vision enable\n    ARDRONE_CONTROL_MASK        = 1U <<  3, // CONTROL ALGO              : (0) Euler angles control, (1) Angular speed control\n    ARDRONE_ALTITUDE_MASK       = 1U <<  4, // ALTITUDE CONTROL ALGO     : (0) Altitude control inactive (1) Altitude control active\n    ARDRONE_USER_FEEDBACK_START = 1U <<  5, // USER feedback             :     Start button state \n    ARDRONE_COMMAND_MASK        = 1U <<  6, // Control command ACK       : (0) None, (1) One received\n    ARDRONE_CAMERA_MASK         = 1U <<  7, // CAMERA MASK               : (0) Camera not ready, (1) Camera ready\n    ARDRONE_TRAVELLING_MASK     = 1U <<  8, // Travelling mask           : (0) Disable, (1) Enable\n    ARDRONE_USB_MASK            = 1U <<  9, // USB key                   : (0) Usb key not ready, (1) Usb key ready\n    ARDRONE_NAVDATA_DEMO_MASK   = 1U << 10, // Navdata demo              : (0) All navdata, (1) Only navdata demo\n    ARDRONE_NAVDATA_BOOTSTRAP   = 1U << 11, // Navdata bootstrap         : (0) Options sent in all or demo mode, (1) No navdata options sent\n    ARDRONE_MOTORS_MASK         = 1U << 12, // Motors status             : (0) Ok, (1) Motors problem\n    ARDRONE_COM_LOST_MASK       = 1U << 13, // Communication Lost        : (1) Com problem, (0) Com is ok\n    ARDRONE_VBAT_LOW            = 1U << 15, // VBat low                  : (1) Too low, (0) Ok\n    ARDRONE_USER_EL             = 1U << 16, // User Emergency Landing    : (1) User EL is ON, (0) User EL is OFF\n    ARDRONE_TIMER_ELAPSED       = 1U << 17, // Timer elapsed             : (1) Elapsed, (0) Not elapsed\n    ARDRONE_ANGLES_OUT_OF_RANGE = 1U << 19, // Angles                    : (0) Ok, (1) Out of range\n    ARDRONE_ULTRASOUND_MASK     = 1U << 21, // Ultrasonic sensor         : (0) Ok, (1) Deaf\n    ARDRONE_CUTOUT_MASK         = 1U << 22, // Cutout system detection   : (0) Not detected, (1) Detected\n    ARDRONE_PIC_VERSION_MASK    = 1U << 23, // PIC Version number OK     : (0) A bad version number, (1) Version number is OK */\n    ARDRONE_ATCODEC_THREAD_ON   = 1U << 24, // ATCodec thread ON         : (0) Thread OFF (1) thread ON\n    ARDRONE_NAVDATA_THREAD_ON   = 1U << 25, // Navdata thread ON         : (0) Thread OFF (1) thread ON\n    ARDRONE_VIDEO_THREAD_ON     = 1U << 26, // Video thread ON           : (0) Thread OFF (1) thread ON\n    ARDRONE_ACQ_THREAD_ON       = 1U << 27, // Acquisition thread ON     : (0) Thread OFF (1) thread ON\n    ARDRONE_CTRL_WATCHDOG_MASK  = 1U << 28, // CTRL watchdog             : (1) Delay in control execution (> 5ms), (0) Control is well scheduled\n    ARDRONE_ADC_WATCHDOG_MASK   = 1U << 29, // ADC Watchdog              : (1) Delay in uart2 dsr (> 5ms), (0) Uart2 is good\n    ARDRONE_COM_WATCHDOG_MASK   = 1U << 30, // Communication Watchdog    : (1) Com problem, (0) Com is ok\n    ARDRONE_EMERGENCY_MASK      = 1U << 31  // Emergency landing         : (0) No emergency, (1) Emergency\n};\n\n// Navdata tags\nenum ARDRONE_NAVDATA_TAG {\n    ARDRONE_NAVDATA_DEMO_TAG            =  0,\n    ARDRONE_NAVDATA_TIME_TAG            =  1,\n    ARDRONE_NAVDATA_RAW_MEASURES_TAG    =  2,\n    ARDRONE_NAVDATA_PHYS_MEASURES_TAG   =  3,\n    ARDRONE_NAVDATA_GYROS_OFFSETS_TAG   =  4,\n    ARDRONE_NAVDATA_EULER_ANGLES_TAG    =  5,\n    ARDRONE_NAVDATA_REFERENCES_TAG      =  6,\n    ARDRONE_NAVDATA_TRIMS_TAG           =  7,\n    ARDRONE_NAVDATA_RC_REFERENCES_TAG   =  8,\n    ARDRONE_NAVDATA_PWM_TAG             =  9,\n    ARDRONE_NAVDATA_ALTITUDE_TAG        = 10,\n    ARDRONE_NAVDATA_VISION_RAW_TAG      = 11,\n    ARDRONE_NAVDATA_VISION_OF_TAG       = 12,\n    ARDRONE_NAVDATA_VISION_TAG          = 13,\n    ARDRONE_NAVDATA_VISION_PERF_TAG     = 14,\n    ARDRONE_NAVDATA_TRACKERS_SEND_TAG   = 15,\n    ARDRONE_NAVDATA_VISION_DETECT_TAG   = 16,\n    ARDRONE_NAVDATA_WATCHDOG_TAG        = 17,\n    ARDRONE_NAVDATA_IPHONE_ANGLES_TAG   = 18,\n    ARDRONE_NAVDATA_ADC_DATA_FRAME_TAG  = 18,\n    ARDRONE_NAVDATA_VIDEO_STREAM_TAG    = 19,\n    ARDRONE_NAVDATA_GAME_TAG            = 20,       // AR.Drone 1.7.4\n    ARDRONE_NAVDATA_PRESSURE_RAW_TAG    = 21,       // AR.Drone 2.0\n    ARDRONE_NAVDATA_MAGNETO_TAG         = 22,       // AR.Drone 2.0\n    ARDRONE_NAVDATA_WIND_TAG            = 23,       // AR.Drone 2.0\n    ARDRONE_NAVDATA_KALMAN_PRESSURE_TAG = 24,       // AR.Drone 2.0\n    ARDRONE_NAVDATA_HDVIDEO_STREAM_TAG  = 25,       // AR.Drone 2.0\n    ARDRONE_NAVDATA_WIFI_TAG            = 26,       // AR.Drone 2.0\n    ARDRONE_NAVDATA_ZIMMU3000_TAG       = 27,       // AR.Drone 2.0\n    ARDRONE_NAVDATA_GPS_TAG             = 27,       // AR.Drone 2.4.1\n    ARDRONE_NAVDATA_CKS_TAG             = 0xFFFF\n};\n\n// Flight animation IDs\nenum ARDRONE_ANIMATION_ID {\n    ARDRONE_ANIM_PHI_M30_DEG             =  0,\n    ARDRONE_ANIM_PHI_30_DEG              =  1,\n    ARDRONE_ANIM_THETA_M30_DEG           =  2,\n    ARDRONE_ANIM_THETA_30_DEG            =  3,\n    ARDRONE_ANIM_THETA_20DEG_YAW_200DEG  =  4,\n    ARDRONE_ANIM_THETA_20DEG_YAW_M200DEG =  5,\n    ARDRONE_ANIM_TURNAROUND              =  6,\n    ARDRONE_ANIM_TURNAROUND_GODOWN       =  7,\n    ARDRONE_ANIM_YAW_SHAKE               =  8,\n    ARDRONE_ANIM_YAW_DANCE               =  9,\n    ARDRONE_ANIM_PHI_DANCE               = 10,\n    ARDRONE_ANIM_THETA_DANCE             = 11,\n    ARDRONE_ANIM_VZ_DANCE                = 12,\n    ARDRONE_ANIM_WAVE                    = 13,\n    ARDRONE_ANIM_PHI_THETA_MIXED         = 14,\n    ARDRONE_ANIM_DOUBLE_PHI_THETA_MIXED  = 15,\n    ARDRONE_ANIM_FLIP_AHEAD              = 16,  // AR.Drone 2.0\n    ARDRONE_ANIM_FLIP_BEHIND             = 17,  // AR.Drone 2.0\n    ARDRONE_ANIM_FLIP_LEFT               = 18,  // AR.Drone 2.0\n    ARDRONE_ANIM_FLIP_RIGHT              = 19,  // AR.Drone 2.0\n    ARDRONE_NB_ANIM_MAYDAY               = 20\n};\n\n// LED animation IDs\nenum ARDRONE_LED_ANIMATION_ID {\n    ARDRONE_LED_ANIM_BLINK_GREEN_RED              =  0,\n    ARDRONE_LED_ANIM_BLINK_GREEN                  =  1,\n    ARDRONE_LED_ANIM_BLINK_RED                    =  2,\n    ARDRONE_LED_ANIM_BLINK_ORANGE                 =  3,\n    ARDRONE_LED_ANIM_SNAKE_GREEN_RED              =  4,\n    ARDRONE_LED_ANIM_FIRE                         =  5,\n    ARDRONE_LED_ANIM_STANDARD                     =  6,\n    ARDRONE_LED_ANIM_RED                          =  7,\n    ARDRONE_LED_ANIM_GREEN                        =  8,\n    ARDRONE_LED_ANIM_RED_SNAKE                    =  9,\n    ARDRONE_LED_ANIM_BLANK                        = 10,\n    ARDRONE_LED_ANIM_RIGHT_MISSILE                = 11,\n    ARDRONE_LED_ANIM_LEFT_MISSILE                 = 12,\n    ARDRONE_LED_ANIM_DOUBLE_MISSILE               = 13,\n    ARDRONE_LED_ANIM_FRONT_LEFT_GREEN_OTHERS_RED  = 14,\n    ARDRONE_LED_ANIM_FRONT_RIGHT_GREEN_OTHERS_RED = 15,\n    ARDRONE_LED_ANIM_REAR_RIGHT_GREEN_OTHERS_RED  = 16,\n    ARDRONE_LED_ANIM_REAR_LEFT_GREEN_OTHERS_RED   = 17,\n    ARDRONE_LED_ANIM_LEFT_GREEN_RIGHT_RED         = 18,\n    ARDRONE_LED_ANIM_LEFT_RED_RIGHT_GREEN         = 19,\n    ARDRONE_LED_ANIM_BLINK_STANDARD               = 20,\n    ARDRONE_NB_LED_ANIM_MAYDAY                    = 21\n};\n\n// TCP Class\nclass TCPSocket {\npublic:\n    TCPSocket();                            // Constructor\n    virtual ~TCPSocket();                   // Destructor\n    int  open(const char *addr, int port);  // Initialize\n    int  send2(void *data, size_t size);    // Send data\n    int  sendf(const char *str, ...);       // Send with format\n    int  receive(void *data, size_t size);  // Receive data\n    void close(void);                       // Finalize\nprivate:\n    SOCKET sock;                            // Socket\n    sockaddr_in server_addr, client_addr;   // Server/Client IP adrress\n};\n\n// UDP Class\nclass UDPSocket {\npublic:\n    UDPSocket();                            // Constructor\n    virtual ~UDPSocket();                   // Destructor\n    int  open(const char *addr, int port);  // Initialize\n    int  send2(void *data, size_t size);    // Send data\n    int  sendf(const char *str, ...);       // Send with format\n    int  receive(void *data, size_t size);  // Receive data\n    void close(void);                       // Finalize\nprivate:\n    SOCKET sock;                            // Socket\n    sockaddr_in server_addr, client_addr;   // Server/Client IP adrress\n};\n\n// Navdata\n#pragma pack(push, 1)\nstruct ARDRONE_NAVDATA {\n    // 3x3 matrix\n    struct matrix33_t { \n        float m11, m12, m13;\n        float m21, m22, m23;\n        float m31, m32, m33;\n    };\n\n    // 3x1 vector\n    union vector31_t {\n        float v[3];\n        struct {\n            float x;\n            float y;\n            float z;\n        };\n    };\n\n    // 2x1 vector\n    union vector21_t {\n        float v[2];\n        struct {\n            float x;\n            float y;\n        };\n    };\n\n    // Velocities\n    struct velocities_t {\n        float x;\n        float y;\n        float z;\n    };\n\n    // Screen point\n    struct screen_point_t {\n        int x;\n        int y;\n    };\n\n    // Header\n    unsigned int header;\n    unsigned int ardrone_state;\n    unsigned int sequence;\n    unsigned int vision_defined;\n\n    // Demo\n    struct NAVDATA_DEMO {\n        unsigned short tag;\n        unsigned short size;\n        unsigned int   ctrl_state;\n        unsigned int   vbat_flying_percentage;\n        float          theta;\n        float          phi;\n        float          psi;\n        int            altitude;\n        float          vx;\n        float          vy;\n        float          vz;\n        unsigned int   num_frames;                // Don't use\n        matrix33_t     detection_camera_rot;      // Don't use\n        vector31_t     detection_camera_trans;    // Don't use\n        unsigned int   detection_tag_index;       // Don't use\n        unsigned int   detection_camera_type;     // Don't use\n        matrix33_t     drone_camera_rot;          // Don't use\n        vector31_t     drone_camera_trans;        // Don't use\n    } demo;\n\n    // Timestamp\n    struct NAVDATA_TIME {\n        unsigned short tag;\n        unsigned short size;\n        unsigned int   time;\n    } time;\n\n    // Raw measurements\n    struct NAVDATA_RAW_MEASURES {\n        unsigned short tag;\n        unsigned short size;\n        unsigned short raw_accs[3];         // filtered accelerometers\n        short          raw_gyros[3];        // filtered gyrometers\n        short          raw_gyros_110[2];    // gyrometers  x/y 110 deg/s\n        unsigned int   vbat_raw;            // battery voltage raw (mV)\n        unsigned short us_debut_echo;\n        unsigned short us_fin_echo;\n        unsigned short us_association_echo;\n        unsigned short us_distance_echo;\n        unsigned short us_courbe_temps;\n        unsigned short us_courbe_valeur;\n        unsigned short us_courbe_ref;\n        unsigned short flag_echo_ini;\n        //unsigned short frame_number;\n        unsigned short nb_echo;\n        unsigned int   sum_echo;\n        int            alt_temp_raw;\n        short          gradient;\n    } raw_measures;\n\n    // Physical measurements\n    struct NAVDATA_PHYS_MEASURES {\n        unsigned short tag;\n        unsigned short size;\n        float          accs_temp;\n        unsigned short gyro_temp;\n        float          phys_accs[3];\n        float          phys_gyros[3];\n        unsigned int   alim3V3;         // 3.3 volt alim       [LSB]\n        unsigned int   vrefEpson;       // ref volt Epson gyro [LSB]\n        unsigned int   vrefIDG;         // ref volt IDG gyro   [LSB]\n    } phys_measures;\n\n    // Gyros offsets\n    struct NAVDATA_GYROS_OFFSETS {\n        unsigned short tag;\n        unsigned short size;\n        float          offset_g[3];\n    } gyros_offsets;\n\n    // Euler angles\n    struct NAVDATA_EULER_ANGLES {\n        unsigned short tag;\n        unsigned short size;\n        float          theta_a;\n        float          phi_a;\n    } euler_angles;\n\n    // References\n    struct NAVDATA_REFERENCES {\n        unsigned short tag;\n        unsigned short size;\n        int            ref_theta;\n        int            ref_phi;\n        int            ref_theta_I;\n        int            ref_phi_I;\n        int            ref_pitch;\n        int            ref_roll;\n        int            ref_yaw;\n        int            ref_psi;\n        float          vx_ref;\n        float          vy_ref;\n        float          theta_mod;\n        float          phi_mod;\n        float          k_v_x;\n        float          k_v_y;\n        unsigned int   k_mode;\n        float          ui_time;\n        float          ui_theta;\n        float          ui_phi;\n        float          ui_psi;\n        float          ui_psi_accuracy;\n        int            ui_seq;\n    } references;\n\n    // Trims\n    struct NAVDATA_TRIMS {\n        unsigned short tag;\n        unsigned short size;\n        float          angular_rates_trim_r;\n        float          euler_angles_trim_theta;\n        float          euler_angles_trim_phi;\n    } trims;\n\n    // RC references\n    struct NAVDATA_RC_REFERENCES {\n        unsigned short tag;\n        unsigned short size;\n        int            rc_ref_pitch;\n        int            rc_ref_roll;\n        int            rc_ref_yaw;\n        int            rc_ref_gaz;\n        int            rc_ref_ag;\n    } rc_references;\n\n    // PWM\n    struct NAVDATA_PWM {\n        unsigned short tag;\n        unsigned short size;\n        unsigned char  motor1;\n        unsigned char  motor2;\n        unsigned char  motor3;\n        unsigned char  motor4;\n        unsigned char  sat_motor1;\n        unsigned char  sat_motor2;\n        unsigned char  sat_motor3;\n        unsigned char  sat_motor4;\n        float          gaz_feed_forward;\n        float          gaz_altitude;\n        float          altitude_integral;\n        float          vz_ref;\n        int            u_pitch;\n        int            u_roll;\n        int            u_yaw;\n        float          yaw_u_I;\n        int            u_pitch_planif;\n        int            u_roll_planif;\n        int            u_yaw_planif;\n        float          u_gaz_planif;\n        unsigned short current_motor1;\n        unsigned short current_motor2;\n        unsigned short current_motor3;\n        unsigned short current_motor4;\n        float          altitude_prop;\n        float          altitude_der;\n    } pwm;\n\n    // Altitude\n    struct NAVDATA_ALTITUDE {\n        unsigned short tag;\n        unsigned short size;\n        int            altitude_vision;\n        float          altitude_vz;\n        int            altitude_ref;\n        int            altitude_raw;\n        float          obs_accZ;\n        float          obs_alt;\n        vector31_t     obs_x;\n        unsigned int   obs_state;\n        vector21_t     est_vb;\n        unsigned int   est_state;\n    } altitude;\n\n    // Vision (raw)\n    struct NAVDATA_VISION_RAW {\n        unsigned short tag;\n        unsigned short size;\n        float          vision_tx_raw;\n        float          vision_ty_raw;\n        float          vision_tz_raw;\n    } vision_raw;\n\n    // Vision (offset?)\n    struct NAVDATA_VISION_OF {\n        unsigned short tag;\n        unsigned short size;\n        float          of_dx[5];\n        float          of_dy[5];\n    } vision_of;\n\n    // Vision\n    struct NAVDATA_VISION {\n        unsigned short tag;\n        unsigned short size;\n        unsigned int   vision_state;\n        int            vision_misc;\n        float          vision_phi_trim;\n        float          vision_phi_ref_prop;\n        float          vision_theta_trim;\n        float          vision_theta_ref_prop;\n        int            new_raw_picture;\n        float          theta_capture;\n        float          phi_capture;\n        float          psi_capture;\n        int            altitude_capture;\n        unsigned int   time_capture;    // time in TSECDEC format (see config.h)\n        velocities_t   body_v;\n        float          delta_phi;\n        float          delta_theta;\n        float          delta_psi;\n        unsigned int   gold_defined;\n        unsigned int   gold_reset;\n        float          gold_x;\n        float          gold_y;\n    } vision;\n\n    // Vision performances\n    struct NAVDATA_VISION_PERF {\n        unsigned short tag;\n        unsigned short size;\n        float          time_szo;\n        float          time_corners;\n        float          time_compute;\n        float          time_tracking;\n        float          time_trans;\n        float          time_update;\n        float          time_custom[20];\n    } vision_perf;\n\n    // Trackers\n    struct NAVDATA_TRACKERS_SEND {\n        unsigned short tag;\n        unsigned short size;\n        int            locked[30];\n        screen_point_t point[30];\n    } trackers_send;\n\n    // Vision detection\n    struct NAVDATA_VISION_DETECT {\n        unsigned short tag;\n        unsigned short size;\n        unsigned int   nb_detected;\n        unsigned int   type[4];\n        unsigned int   xc[4];\n        unsigned int   yc[4];\n        unsigned int   width[4];\n        unsigned int   height[4];\n        unsigned int   dist[4];\n        float          orientation_angle[4];\n        matrix33_t     rotation[4];\n        vector31_t     translation[4];\n        unsigned int   camera_source[4];\n    } vision_detect;\n\n    // Watchdog\n    struct NAVDATA_WATCHDOG {\n        unsigned short tag;\n        unsigned short size;\n        int            watchdog;\n    } watchdog;\n\n    // ADC data\n    struct NAVDATA_ADC_DATA_FRAME {\n        unsigned short tag;\n        unsigned short size;\n        unsigned int   version;\n        unsigned char  data_frame[32];\n    } adc_data_frame;\n\n    // Video stream\n    struct NAVDATA_VIDEO_STREAM {\n        unsigned short tag;\n        unsigned short size;\n        unsigned char  quant;               // quantizer reference used to encode frame [1:31]\n        unsigned int   frame_size;          // frame size (bytes)\n        unsigned int   frame_number;        // frame index\n        unsigned int   atcmd_ref_seq;       // atmcd ref sequence number\n        unsigned int   atcmd_mean_ref_gap;  // mean time between two consecutive atcmd_ref (ms)\n        float          atcmd_var_ref_gap;\n        unsigned int   atcmd_ref_quality;   // estimator of atcmd link quality\n\n        // drone2\n        unsigned int   out_bitrate;         // measured out throughput from the video tcp socket\n        unsigned int   desired_bitrate;     // last frame size generated by the video encoder\n        int            data1;\n        int            data2;\n        int            data3;\n        int            data4;\n        int            data5;\n        unsigned int   tcp_queue_level;\n        unsigned int   fifo_queue_level;\n    } video_stream;\n\n    // Games\n    struct NAVDATA_GAMES {\n        unsigned short tag;\n        unsigned short size;\n        unsigned int   double_tap_counter;\n        unsigned int   finish_line_counter;\n    } games;\n\n    // Preassure (raw)\n    struct NAVDATA_PRESSURE_RAW {\n        unsigned short tag;\n        unsigned short size;\n        unsigned int   up;\n        unsigned short ut;\n        unsigned int   temperature_meas;\n        unsigned int   pression_meas;\n    } pressure_raw;\n\n    // Magneto\n    struct NAVDATA_MAGNETO {\n        unsigned short tag;\n        unsigned short size;\n        short          mx;\n        short          my;\n        short          mz;\n        vector31_t     magneto_raw;             // magneto in the body frame, in mG\n        vector31_t     magneto_rectified;\n        vector31_t     magneto_offset;\n        float          heading_unwrapped;\n        float          heading_gyro_unwrapped;\n        float          heading_fusion_unwrapped;\n        char           magneto_calibration_ok;\n        unsigned int   magneto_state;\n        float          magneto_radius;\n        float          error_mean;\n        float          error_var;\n        float          tmp1, tmp2;              // dummy ?\n    } magneto;\n\n    // Wind\n    struct NAVDATA_WIND {\n        unsigned short tag;\n        unsigned short size;\n        float          wind_speed;              // estimated wind speed [m/s]\n        float          wind_angle;              // estimated wind direction in North-East frame [rad] e.g. if wind_angle is pi/4, wind is from South-West to North-East\n        float          wind_compensation_theta;\n        float          wind_compensation_phi;\n        float          state_x1;\n        float          state_x2;\n        float          state_x3;\n        float          state_x4;\n        float          state_x5;\n        float          state_x6;\n        float          magneto_debug1;\n        float          magneto_debug2;\n        float          magneto_debug3;\n    } wind;\n\n    // Kalman filter\n    struct NAVDATA_KALMAN_PRESSURE {\n        unsigned short tag;\n        unsigned short size;\n        float          offset_pressure;\n        float          est_z;\n        float          est_zdot;\n        float          est_bias_PWM;\n        float          est_biais_pression;\n        float          offset_US;\n        float          prediction_US;\n        float          cov_alt;\n        float          cov_PWM;\n        float          cov_vitesse;\n        bool           bool_effet_sol;\n        float          somme_inno;\n        bool           flag_rejet_US;\n        float          u_multisinus;\n        float          gaz_altitude;\n        bool           flag_multisinus;\n        bool           flag_multisinus_debut;\n    } kalman_pressure;\n\n    // HD video stream\n    struct NAVDATA_HDVIDEO_STREAM {\n        unsigned short tag;\n        unsigned short size;\n        unsigned int   hdvideo_state;\n        unsigned int   storage_fifo_nb_packets;\n        unsigned int   storage_fifo_size;\n        unsigned int   usbkey_size;           // USB key in kbytes - 0 if no key present\n        unsigned int   usbkey_freespace;      // USB key free space in kbytes - 0 if no key present\n        unsigned int   frame_number;          // 'frame_number' PaVE field of the frame starting to be encoded for the HD stream\n        unsigned int   usbkey_remaining_time; // time in seconds\n    } hdvideo_stream;\n\n    // WiFi\n    struct NAVDATA_WIFI {\n        unsigned short tag;\n        unsigned short size;\n        unsigned int   link_quality;\n    } wifi;\n\n    // Zimmu 3000\n    struct NAVDATA_ZIMMU_3000 {\n        unsigned short tag;\n        unsigned short size;\n        int            vzimmuLSB;\n        float          vzfind;\n    } zimmu_3000;\n\n    // GPS (for AR.Drone 2.4.1, or later)\n    // From https://github.com/paparazzi/paparazzi/blob/master/sw/airborne/boards/ardrone/at_com.h\n    struct NAVDATA_GPS {\n        unsigned short tag;                  /*!< Navdata block ('option') identifier */\n        unsigned short size;                 /*!< set this to the size of this structure */\n        double         lat;                  /*!< Latitude */\n        double         lon;                  /*!< Longitude */\n        double         elevation;            /*!< Elevation */\n        double         hdop;                 /*!< hdop */\n        int            data_available;       /*!< When there is data available */\n        unsigned char  unk_0[8];\n        double         lat0;                 /*!< Latitude ??? */\n        double         lon0;                 /*!< Longitude ??? */\n        double         lat_fuse;             /*!< Latitude fused */\n        double         lon_fuse;             /*!< Longitude fused */\n        unsigned int   gps_state;            /*!< State of the GPS, still need to figure out */\n        unsigned char  unk_1[40];\n        double         vdop;                 /*!< vdop */\n        double         pdop;                 /*!< pdop */\n        float          speed;                /*!< speed */\n        unsigned int   last_frame_timestamp; /*!< Timestamp from the last frame */\n        float          degree;               /*!< Degree */\n        float          degree_mag;           /*!< Degree of the magnetic */\n        unsigned char  unk_2[16];\n        struct {\n            unsigned char sat;\n            unsigned char cn0;\n        } channels[12];\n        int             gps_plugged;         /*!< When the gps is plugged */\n        unsigned char   unk_3[108];\n        double          gps_time;            /*!< The gps time of week */\n        unsigned short  week;                /*!< The gps week */\n        unsigned char   gps_fix;             /*!< The gps fix */\n        unsigned char   num_sattelites;      /*!< Number of sattelites */\n        unsigned char   unk_4[24];\n        double          ned_vel_c0;          /*!< NED velocity */\n        double          ned_vel_c1;          /*!< NED velocity */\n        double          ned_vel_c2;          /*!< NED velocity */\n        double          pos_accur_c0;        /*!< Position accuracy */\n        double          pos_accur_c1;        /*!< Position accuracy */\n        double          pos_accur_c2;        /*!< Position accuracy */\n        float           speed_acur;          /*!< Speed accuracy */\n        float           time_acur;           /*!< Time accuracy */\n        unsigned char   unk_5[72];\n        float           temprature;\n        float           pressure;\n    } gps;\n\n    // Check sum\n    struct NAVDATA_CKS {\n        unsigned short tag;\n        unsigned short size;\n        unsigned int   cks;\n    } cks;\n};\n#pragma pack(pop)\n\n// Configurations\nstruct ARDRONE_CONFIG {\n    struct CONFIG_GENERAL {\n        int   num_version_config;\n        int   num_version_mb;\n        char  num_version_soft[32];\n        char  drone_serial[32];\n        char  soft_build_date[32];\n        float motor1_soft;\n        float motor1_hard;\n        float motor1_supplier;\n        float motor2_soft;\n        float motor2_hard;\n        float motor2_supplier;\n        float motor3_soft;\n        float motor3_hard;\n        float motor3_supplier;\n        float motor4_soft;\n        float motor4_hard;\n        float motor4_supplier;\n        char  ardrone_name[32];\n        int   flying_time;\n        bool  navdata_demo;\n        int   com_watchdog;\n        bool  video_enable;\n        bool  vision_enable;\n        int   vbat_min;\n        int   localtime;\n        int   navdata_options;\n        float gps_soft;\n        float gps_hard;\n        char  localtime_zone[32];\n        char  timezone[32];\n        int   battery_type;\n    } general;\n\n    struct CONFIG_CONTROL {\n        float accs_offset[3];\n        float accs_gains[9];\n        float gyros_offset[3];\n        float gyros_gains[3];\n        float gyros110_offset[2];\n        float gyros110_gains[2];\n        float magneto_offset[3];\n        float magneto_radius;\n        float gyro_offset_thr_x;\n        float gyro_offset_thr_y;\n        float gyro_offset_thr_z;\n        int   pwm_ref_gyros;\n        int   osctun_value;\n        bool  osctun_test;\n        int   altitude_max;\n        int   altitude_min;\n        bool  outdoor;\n        bool  flight_without_shell;\n        bool  autonomous_flight;\n        int   flight_anim[2];\n        int   control_level;\n        float euler_angle_max;\n        float control_iphone_tilt;\n        float control_vz_max;\n        float control_yaw;\n        bool  manual_trim;\n        float indoor_euler_angle_max;\n        float indoor_control_vz_max;\n        float indoor_control_yaw;\n        float outdoor_euler_angle_max;\n        float outdoor_control_vz_max;\n        float outdoor_control_yaw;\n        int   flying_mode;\n        int   hovering_range;\n        int   flying_camera_mode[10];\n        bool  flying_camera_enable;\n    } control;\n\n    struct CONFIG_NETWORK {\n        char ssid_single_player[32];\n        char ssid_multi_player[32];\n        int  wifi_mode;\n        int  wifi_rate;\n        char owner_mac[18];\n    } network;\n\n    struct CONFIG_PIC {\n        int ultrasound_freq;\n        int ultrasound_watchdog;\n        int pic_version;\n    } pic;\n\n    struct CONFIG_VIDEO {\n        int  camif_fps;\n        int  camif_buffers;\n        int  num_trackers;\n        int  video_storage_space;\n        bool video_on_usb;\n        int  video_file_index;\n        int  bitrate;\n        int  bitrate_ctrl_mode;\n        int  bitrate_storage;\n        int  codec_fps;\n        int  video_codec;\n        int  video_slices;\n        int  video_live_socket;\n        int  max_bitrate;\n        int  video_channel;\n        int  exposure_mode[4];\n        int  saturation_mode;\n        int  whitebalance_mode[2];\n    } video;\n\n    struct CONFIG_LEDS {\n        int leds_anim[3];\n    } leds;\n\n    struct CONFIG_DETECT {\n        int enemy_colors;\n        int enemy_without_shell;\n        int groundstripe_colors;\n        int detect_type;\n        int detections_select_h;\n        int detections_select_v_hsync;\n        int detections_select_v;\n    } detect;\n\n    struct CONFIG_SYSLOG {\n        int output;\n        int max_size;\n        int nb_files;\n    } syslog;\n\n    struct CONFIG_CUSTOM {\n        char application_desc[64];\n        char profile_desc[64];\n        char session_desc[64];\n        char application_id[9];\n        char profile_id[9];\n        char session_id[9];\n    } custom;\n\n    struct CONFIG_USERBOX {\n        int userbox_cmd;\n    } userbox;\n\n    struct CONFIG_GPS {\n        float latitude;\n        float longitude;\n        float altitude;\n        float accuracy;\n        //int ephemeris_uploaded;\n    } gps;\n\n    struct FLIGHT_PLAN {\n        float default_validation_radius;\n        float default_validation_time;\n        int   max_distance_from_takeoff;\n        int   gcs_ip;\n        int   video_stop_delay;\n        bool  low_battery_go_home;\n        bool  automatic_heading;\n        int   com_lost_action_delay;\n        int   altitude_go_home;\n        char  mavlink_js_roll_left[3];\n        char  mavlink_js_roll_right[3];\n        char  mavlink_js_pitch_front[3];\n        char  mavlink_js_pitch_back[3];\n        char  mavlink_js_yaw_left[3];\n        char  mavlink_js_yaw_right[3];\n        char  mavlink_js_go_up[3];\n        char  mavlink_js_go_down[3];\n        char  mavlink_js_inc_gains[3];\n        char  mavlink_js_dec_gains[3];\n        char  mavlink_js_select[3];\n        char  mavlink_js_start[3];\n    } flightplan;\n\n    struct RESCUE {\n        int rescue;\n    } rescue;\n};\n\n// Version information\nstruct ARDRONE_VERSION {\n    int major;\n    int minor;\n    int revision;\n};\n\n// IplImage* <-> cv::Mat converter\nclass ARDRONE_IMAGE {\npublic:\n    ARDRONE_IMAGE(IplImage *img = NULL) {\n        image = img;\n    }\n    operator IplImage*() {\n        return image;\n    }\n    operator cv::Mat() {\n        if (!image) return cv::Mat();\n        return cv::cvarrToMat(image, true);\n    }\n\nprivate:\n    IplImage *image;\n};\n\n// AR.Drone class\nclass ARDrone {\npublic:\n    // Constructor / Destructor\n    ARDrone(const char *ardrone_addr = NULL);\n    virtual ~ARDrone();\n\n    // Initialize\n    virtual int open(const char *ardrone_addr = ARDRONE_DEFAULT_ADDR);\n\n    // Update\n    virtual int update(void);\n\n    // Finalize (Automatically called)\n    virtual void close(void);\n\n    // Get an image\n    virtual ARDRONE_IMAGE getImage(void);\n    virtual ARDrone& operator >> (cv::Mat &image);\n    virtual bool willGetNewImage(void);\n\n    // Get AR.Drone's firmware version\n    virtual int getVersion(int *major = NULL, int *minor = NULL, int *revision = NULL);\n\n    // Get sensor values\n    virtual double getRoll(void);       // Roll angle  [rad]\n    virtual double getPitch(void);      // Pitch angle [rad]\n    virtual double getYaw(void);        // Yaw angle   [rad]\n    virtual double getAltitude(void);   // Altitude    [m]\n    virtual double getVelocity(double *vx = NULL, double *vy = NULL, double *vz = NULL); // Velocity [m/s]\n    virtual int    getPosition(double *latitude = NULL, double *longitude = NULL, double *elevation = NULL); // GPS (only for AR.Drone 2.0)\n\n    // Battery charge [%]\n    virtual int getBatteryPercentage(void);\n\n    // Take off / Landing / Emergency\n    virtual void takeoff(void);\n    virtual void landing(void);\n    virtual void emergency(void);\n\n    // Move with velocity [m/s]\n    virtual void move(double vx, double vy, double vr);\n    virtual void move3D(double vx, double vy, double vz, double vr);\n\n    // Change camera channel\n    virtual void setCamera(int channel);\n\n    // Animation\n    virtual void setAnimation(int id, int duration = 0);             // Flight animation\n    virtual void setLED(int id, float freq = 0.0, int duration = 0); // LED animation\n\n    // Calibration\n    virtual void setFlatTrim(void);                 // Flat trim\n    virtual void setCalibration(int device = 0);    // Magnetometer calibration\n\n    // Others\n    virtual int  onGround(void);                    // Check on ground\n    virtual void setVideoRecord(bool activate);     // Video recording (only for AR.Drone 2.0)\n    virtual void setOutdoorMode(bool activate);     // Outdoor mode (experimental)\n\nprotected:\n    // IP address\n    char ip[16];\n\n    // Sequence number\n    unsigned long int seq;\n\n    // Camera image\n    IplImage *img;\n\n    // Sockets\n    UDPSocket sockCommand;\n    UDPSocket sockNavdata;\n    UDPSocket sockVideo;\n\n    // Version information\n    ARDRONE_VERSION version;\n\n    // Navigation data\n    ARDRONE_NAVDATA navdata;\n\n    // Configurations\n    ARDRONE_CONFIG config;\n\n    // Video\n    AVFormatContext *pFormatCtx;\n    AVCodecContext  *pCodecCtx;\n    AVFrame         *pFrame, *pFrameBGR;\n    uint8_t         *bufferBGR;\n    SwsContext      *pConvertCtx;\n    bool            newImage;\n\n    // Thread for AT command\n    pthread_t *threadCommand;\n    pthread_mutex_t *mutexCommand;\n    virtual void loopCommand(void);\n    static void *runCommand(void *args) {\n        reinterpret_cast<ARDrone*>(args)->loopCommand();\n        return NULL;\n    }\n\n    // Thread for Navdata\n    pthread_t *threadNavdata;\n    pthread_mutex_t *mutexNavdata;\n    virtual void loopNavdata(void);\n    static void *runNavdata(void *args) {\n        reinterpret_cast<ARDrone*>(args)->loopNavdata();\n        return NULL;\n    }\n\n    // Thread for Video\n    pthread_t *threadVideo;\n    pthread_mutex_t *mutexVideo;\n    virtual void loopVideo(void);\n    static void *runVideo(void *args) {\n        reinterpret_cast<ARDrone*>(args)->loopVideo();\n        return NULL;\n    }\n\n    // Initialize (internal)\n    virtual int initCommand(void);\n    virtual int initNavdata(void);\n    virtual int initVideo(void);\n\n    // Get informations (internal)\n    virtual int getVersionInfo(void);\n    virtual int getNavdata(void);\n    virtual int getVideo(void);\n    virtual int getConfig(void);\n\n    // Send commands (internal)\n    virtual void resetWatchDog(void);\n    virtual void resetEmergency(void);\n\n    // Finalize (internal)\n    virtual void finalizeCommand(void);\n    virtual void finalizeNavdata(void);\n    virtual void finalizeVideo(void);\n};\n\n#ifdef _WIN32\n// --------------------------------------------------------------------------\n// CVDRONE_ERROR(Message)\n// Description  : Show an error message.\n// Return value : NONE\n// --------------------------------------------------------------------------\nCV_INLINE void CVDRONE_ERROR(const char *message, ...)\n{\n    char *arg;\n    char str[256];\n\n    // Apply format\n    va_start(arg, message);\n    vsnprintf(str, 256, message, arg);\n    va_end(arg);\n\n    // Show the message\n    MessageBox(NULL, str, \"CVDRONE ERROR MESSAGE\", MB_OK|MB_ICONERROR|MB_TOPMOST|MB_SETFOREGROUND);\n}\n#else\n#define CVDRONE_ERROR printf\n#endif\n\n#endif"
  },
  {
    "path": "src/ardrone/command.cpp",
    "content": "// -------------------------------------------------------------------------\n// CV Drone (= OpenCV + AR.Drone)\n// Copyright(C) 2016 puku0x\n// https://github.com/puku0x/cvdrone\n//\n// This source file is part of CV Drone library.\n//\n// This library is free software; you can redistribute it and/or\n// modify it under the terms of EITHER:\n// (1) The GNU Lesser General Public License as published by the Free\n//     Software Foundation; either version 2.1 of the License, or (at\n//     your option) any later version. The text of the GNU Lesser\n//     General Public License is included with this library in the\n//     file cvdrone-license-LGPL.txt.\n// (2) The BSD-style license that is included with this library in\n//     the file cvdrone-license-BSD.txt.\n// \n// This library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files\n// cvdrone-license-LGPL.txt and cvdrone-license-BSD.txt for more details.\n//\n//! @file   command.cpp\n//! @brief  Sending AT commands\n//\n// -------------------------------------------------------------------------\n\n#include \"ardrone.h\"\n\n// --------------------------------------------------------------------------\n//! @brief   Initialize AT command.\n//! @return  Result of initialization\n//! @retval  1 Success\n//! @retval  0 Failure\n// --------------------------------------------------------------------------\nint ARDrone::initCommand(void)\n{\n    // Open the IP address and port\n    if (!sockCommand.open(ip, ARDRONE_AT_PORT)) {\n        CVDRONE_ERROR(\"UDPSocket::open(port=%d) failed. (%s, %d)\\n\", ARDRONE_AT_PORT, __FILE__, __LINE__);\n        return 0;\n    }\n\n    // AR.Drone 2.0\n    if (version.major == ARDRONE_VERSION_2) {\n        // Send undocumented command\n        sockCommand.sendf(\"AT*PMODE=%d,%d\\r\", ++seq, 2);\n\n        // Send undocumented command\n        sockCommand.sendf(\"AT*MISC=%d,%d,%d,%d,%d\\r\", ++seq, 2, 20, 2000, 3000);\n\n        // Send flat trim\n        sockCommand.sendf(\"AT*FTRIM=%d,\\r\", ++seq);\n\n        // Set the configuration IDs\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"custom:session_id\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID);\n        msleep(500);\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"custom:profile_id\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_PROFILE_ID);\n        msleep(500);\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"custom:application_id\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_APPLOCATION_ID);\n        msleep(500);\n\n        // Set maximum velocity in Z-axis [mm/s]\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:control_vz_max\\\",\\\"%d\\\"\\r\", ++seq, 700);\n        msleep(100);\n\n        // Set maximum yaw [rad/s]\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:control_yaw\\\",\\\"%f\\\"\\r\", ++seq, 99.0 * DEG_TO_RAD);\n        msleep(100);\n\n        // Set maximum euler angle [rad]\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:euler_angle_max\\\",\\\"%f\\\"\\r\", ++seq, 12.0 * DEG_TO_RAD);\n        msleep(100);\n\n        // Set maximum altitude [mm]\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:altitude_max\\\",\\\"%d\\\"\\r\", ++seq, 3000);\n        msleep(100);\n\n        // Bitrate control mode\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:bitrate_ctrl_mode\\\",\\\"%d\\\"\\r\", ++seq, 0);     // VBC_MODE_DISABLED\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:bitrate_ctrl_mode\\\",\\\"%d\\\"\\r\", ++seq, 1);   // VBC_MODE_DYNAMIC\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:bitrate_ctrl_mode\\\",\\\"%d\\\"\\r\", ++seq, 2);   // VBC_MANUAL\n        msleep(100);\n\n        // Bitrate\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:bitrate\\\",\\\"%d\\\"\\r\", ++seq, 1000);\n        msleep(100);\n\n        // Max bitrate\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:max_bitrate\\\",\\\"%d\\\"\\r\", ++seq, 4000);\n        msleep(100);\n\n        // Set video codec\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_codec\\\",\\\"%d\\\"\\r\", ++seq, 0x81);   // H264_360P_CODEC\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_codec\\\",\\\"%d\\\"\\r\", ++seq, 0x82); // MP4_360P_H264_720P_CODEC\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_codec\\\",\\\"%d\\\"\\r\", ++seq, 0x83); // H264_720P_CODEC\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_codec\\\",\\\"%d\\\"\\r\", ++seq, 0x88); // MP4_360P_H264_360P_CODEC\n        msleep(100);\n\n        // Set video channel to default\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_channel\\\",\\\"0\\\"\\r\", ++seq);\n        msleep(100);\n\n        // Disable USB recording\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_on_usb\\\",\\\"FALSE\\\"\\r\", ++seq);\n        msleep(100);\n    }\n    // AR.Drone 1.0\n    else {\n        // Send undocumented command\n        sockCommand.sendf(\"AT*PMODE=%d,%d\\r\", ++seq, 2);\n\n        // Send undocumented command\n        sockCommand.sendf(\"AT*MISC=%d,%d,%d,%d,%d\\r\", ++seq, 2, 20, 2000, 3000);\n\n        // Send flat trim\n        sockCommand.sendf(\"AT*FTRIM=%d,\\r\", ++seq);\n\n        // Set maximum velocity in Z-axis [mm/s]\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:control_vz_max\\\",\\\"%d\\\"\\r\", ++seq, 700);\n        msleep(100);\n\n        // Set maximum yaw [rad/s]\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:control_yaw\\\",\\\"%f\\\"\\r\", ++seq, 99.0 * DEG_TO_RAD);\n        msleep(100);\n\n        // Set maximum euler angle [rad]\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:euler_angle_max\\\",\\\"%f\\\"\\r\", ++seq, 12.0 * DEG_TO_RAD);\n        msleep(100);\n\n        // Set maximum altitude [mm]\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:altitude_max\\\",\\\"%d\\\"\\r\", ++seq, 3000);\n        msleep(100);\n\n        // Bitrate control mode\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:bitrate_ctrl_mode\\\",\\\"%d\\\"\\r\", ++seq, 0);     // VBC_MODE_DISABLED\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:bitrate_ctrl_mode\\\",\\\"%d\\\"\\r\", ++seq, 1);   // VBC_MODE_DYNAMIC\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:bitrate_ctrl_mode\\\",\\\"%d\\\"\\r\", ++seq, 2);   // VBC_MANUAL\n        msleep(100);\n\n        // Bitrate\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:bitrate\\\",\\\"%d\\\"\\r\", ++seq, 1000);\n        //msleep(100);\n\n        // Max bitrate\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:max_bitrate\\\",\\\"%d\\\"\\r\", ++seq, 4000);\n        //msleep(100);\n\n        // Set video codec\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_codec\\\",\\\"%d\\\"\\r\", ++seq, 0x20);   // UVLC_CODEC\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_codec\\\",\\\"%d\\\"\\r\", ++seq, 0x40); // P264_CODEC (not supported)\n        msleep(100);\n        \n        // Set video channel to default\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_channel\\\",\\\"0\\\"\\r\", ++seq);\n        msleep(100);\n    }\n\n    // Disable outdoor mode\n    setOutdoorMode(false);\n\n    // Create a mutex\n    mutexCommand = new pthread_mutex_t;\n    pthread_mutex_init(mutexCommand, NULL);\n\n    // Create a thread\n    threadCommand = new pthread_t;\n    if (pthread_create(threadCommand, NULL, runCommand, this) != 0) {\n        CVDRONE_ERROR(\"pthread_create() was failed. (%s, %d)\\n\", __FILE__, __LINE__);\n        return 0;\n    }\n\n    return 1;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Thread function for AT command.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::loopCommand(void)\n{\n    while (1) {\n        // Reset Watch-Dog every 100ms\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*COMWDG=%d\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n        pthread_testcancel();\n        msleep(100);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Take off the AR.Drone.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::takeoff(void)\n{\n    // Get the state\n    if (mutexCommand) pthread_mutex_lock(mutexNavdata);\n    int state = navdata.ardrone_state;\n    if (mutexCommand) pthread_mutex_unlock(mutexNavdata);\n\n    // If AR.Drone is in emergency, reset it\n    if (state & ARDRONE_EMERGENCY_MASK) emergency();\n    else {\n        // Send take off\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*REF=%d,290718208\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Land the AR.Drone.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::landing(void)\n{\n    // Get the state\n    if (mutexCommand) pthread_mutex_lock(mutexNavdata);\n    int state = navdata.ardrone_state;\n    if (mutexCommand) pthread_mutex_unlock(mutexNavdata);\n\n    // If AR.Drone is in emergency, reset it\n    if (state & ARDRONE_EMERGENCY_MASK) emergency();\n    else {\n        // Send langding\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*REF=%d,290717696\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Emergency stop.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::emergency(void)\n{\n    // Send emergency\n    if (mutexCommand) pthread_mutex_lock(mutexCommand);\n    sockCommand.sendf(\"AT*REF=%d,290717952\\r\", ++seq);\n    if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Move the AR.Drone in 2D plane.\n//! @param   vx X velocity [m/s]\n//! @param   vy Y velocity [m/s]\n//! @param   vr Angular velocity [rad/s]\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::move(double vx, double vy, double vr)\n{\n    move3D(vx, vy, 0.0, vr);\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Move the AR.Drone in 3D space.\n//! @param   vx X velocity [m/s]\n//! @param   vy Y velocity [m/s]\n//! @param   vz Z velocity [m/s]\n//! @param   vr Angular velocity [rad/s]\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::move3D(double vx, double vy, double vz, double vr)\n{\n    // AR.Drone is flying\n    if (!onGround()) {\n        // Command velocities\n        float v[4] = {-0.2f * (float)vy, -0.2f * (float)vx, 1.0f * (float)vz, -0.5f * (float)vr};\n        int mode = (fabs(v[0]) > 0.0 || fabs(v[1]) > 0.0);\n\n        // Nomarization (-1.0 to +1.0)\n        for (int i = 0; i < 4; i++) {\n            if (fabs(v[i]) > 1.0) v[i] /= fabs(v[i]);\n        }\n\n        // Send a command\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*PCMD=%d,%d,%d,%d,%d,%d\\r\", ++seq, mode, *(int*)(&v[0]), *(int*)(&v[1]), *(int*)(&v[2]), *(int*)(&v[3]));\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Change the camera channel.\n//! @param   channel Camera channel \n//! @note    AR.Drone 1.0 supports [0, 1, 2, 3]. \n//!          AR.Drone 2.0 supports [0, 1].\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::setCamera(int channel)\n{\n    // Enable mutex lock\n    if (mutexCommand) pthread_mutex_lock(mutexCommand);\n\n    // AR.Drone 2.0\n    if (version.major == ARDRONE_VERSION_2) {\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_channel\\\",\\\"%d\\\"\\r\", ++seq, channel % 2);\n    }\n    // AR.Drone 1.0\n    else {\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_channel\\\",\\\"%d\\\"\\r\", ++seq, channel % 4);\n    }\n\n    // Disable mutex lock\n    if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n\n    msleep(100);\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Set a reference of the horizontal plane.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::setFlatTrim(void)\n{\n    if (onGround()) {\n        // Send flat trim command\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*FTRIM=%d\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Calibrate AR.Drone's magnetometer.\n//! @param   device Device ID\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::setCalibration(int device)\n{\n    if (!onGround()) {\n        // Send calibration command\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*CALIB=%d,%d\\r\", ++seq, device);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Run specified flight animation.\n//! @param   id Flight animation ID\n//! @param   timeout Timeout [ms]\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::setAnimation(int id, int timeout)\n{\n    // ID\n    if (version.major == ARDRONE_VERSION_2) id = abs(id % ARDRONE_NB_ANIM_MAYDAY);\n    else                                    id = abs(id % ARDRONE_ANIM_FLIP_AHEAD);\n\n    // Default timeout\n    if (timeout < 1) {\n        const int default_timeout[ARDRONE_NB_ANIM_MAYDAY] = {1000, 1000, 1000, 1000, 1000, 1000, 5000, 5000, 2000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 15, 15, 15, 15};\n        timeout = default_timeout[id];\n    }\n\n    // Send a command\n    if (mutexCommand) pthread_mutex_lock(mutexCommand);\n    sockCommand.sendf(\"AT*ANIM=%d,%d,%d\\r\", ++seq, id, timeout);\n    if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Run specified LED animation.\n//! @param   id LED animation ID\n//! @param   freq Frequency [Hz],\n//! @param   duration Duration [s]\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::setLED(int id, float freq, int duration)\n{\n    // ID\n    id = abs(id % ARDRONE_NB_LED_ANIM_MAYDAY);\n\n    // Default frequency\n    if (freq <= 0.0) {\n        float default_freq[ARDRONE_NB_LED_ANIM_MAYDAY] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};\n        freq = default_freq[id];\n    }\n\n    // Default frequency\n    if (freq <= 0.0) {\n        int default_duration[ARDRONE_NB_LED_ANIM_MAYDAY] = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};\n        duration = default_duration[id];\n    }\n\n    // Send a command\n    if (mutexCommand) pthread_mutex_lock(mutexCommand);\n    sockCommand.sendf(\"AT*LED=%d,%d,%d,%d\\r\", ++seq, id, *(int*)(&freq), duration);\n    if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Start or stop recording video.\n//! @param   activate Enable / Disable flag\n//! @note    This function is only for AR.Drone 2.0. \n//!          You should set a USB key with > 100MB to your drone\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::setVideoRecord(bool activate)\n{\n    // AR.Drone 2.0\n    if (version.major == ARDRONE_VERSION_2) {\n        // Finalize video\n        finalizeVideo();\n\n        // Enable/Disable video recording\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        if (activate) sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_on_usb\\\",\\\"TRUE\\\"\\r\",  ++seq);\n        else          sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_on_usb\\\",\\\"FALSE\\\"\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n        msleep(100);\n\n        // Output video with MP4_360P_H264_720P_CODEC / H264_360P_CODEC\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        if (activate) sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_codec\\\",\\\"%d\\\"\\r\", ++seq, 0x82);\n        else          sockCommand.sendf(\"AT*CONFIG=%d,\\\"video:video_codec\\\",\\\"%d\\\"\\r\", ++seq, 0x81);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n        msleep(100);\n\n        // Initialize video\n        initVideo();\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Set outdoor mode.\n//! @param   activate Enable / Disable flag\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::setOutdoorMode(bool activate)\n{\n    // AR.Drone 2.0\n    if (version.major == ARDRONE_VERSION_2) {\n        // Enable/Disable outdoor mode\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        if (activate) sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:outdoor\\\",\\\"TRUE\\\"\\r\",  ++seq);\n        else          sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:outdoor\\\",\\\"FALSE\\\"\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n        msleep(100);\n\n        // Without/With shell\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        if (activate) sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:flight_without_shell\\\",\\\"TRUE\\\"\\r\",  ++seq);\n        else          sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:flight_without_shell\\\",\\\"FALSE\\\"\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n        msleep(100);\n    }\n    // AR.Drone 1.0\n    else {\n        // Enable/Disable outdoor mode\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        if (activate) sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:outdoor\\\",\\\"TRUE\\\"\\r\",  ++seq);\n        else          sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:outdoor\\\",\\\"FALSE\\\"\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n        msleep(100);\n\n        // Without/With shell\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        if (activate) sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:flight_without_shell\\\",\\\"TRUE\\\"\\r\",  ++seq);\n        else          sockCommand.sendf(\"AT*CONFIG=%d,\\\"control:flight_without_shell\\\",\\\"FALSE\\\"\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n        msleep(100);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Stop hovering.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::resetWatchDog(void)\n{\n    // Get the state\n    if (mutexCommand) pthread_mutex_lock(mutexNavdata);\n    int state = navdata.ardrone_state;\n    if (mutexCommand) pthread_mutex_unlock(mutexNavdata);\n\n    // If AR.Drone is in Watch-Dog, reset it\n    if (state & ARDRONE_COM_WATCHDOG_MASK) {\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*COMWDG=%d\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Disable the emergency lock.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::resetEmergency(void)\n{\n    // Get the state\n    if (mutexCommand) pthread_mutex_lock(mutexNavdata);\n    int state = navdata.ardrone_state;\n    if (mutexCommand) pthread_mutex_unlock(mutexNavdata);\n\n    // If AR.Drone is in emergency, reset it\n    if (state & ARDRONE_EMERGENCY_MASK) {\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*REF=%d,290717952\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief  Finalize AT command.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::finalizeCommand(void)\n{\n    // Destroy the thread\n    if (threadCommand) {\n        pthread_cancel(*threadCommand);\n        pthread_join(*threadCommand, NULL);\n        delete threadCommand;\n        threadCommand = NULL;\n    }\n\n    // Delete the mutex\n    if (mutexCommand) {\n        pthread_mutex_destroy(mutexCommand);\n        delete mutexCommand;\n        mutexCommand = NULL;\n    }\n\n    // Close the socket\n    sockCommand.close();\n}"
  },
  {
    "path": "src/ardrone/config.cpp",
    "content": "// -------------------------------------------------------------------------\n// CV Drone (= OpenCV + AR.Drone)\n// Copyright(C) 2016 puku0x\n// https://github.com/puku0x/cvdrone\n//\n// This source file is part of CV Drone library.\n//\n// This library is free software; you can redistribute it and/or\n// modify it under the terms of EITHER:\n// (1) The GNU Lesser General Public License as published by the Free\n//     Software Foundation; either version 2.1 of the License, or (at\n//     your option) any later version. The text of the GNU Lesser\n//     General Public License is included with this library in the\n//     file cvdrone-license-LGPL.txt.\n// (2) The BSD-style license that is included with this library in\n//     the file cvdrone-license-BSD.txt.\n// \n// This library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files\n// cvdrone-license-LGPL.txt and cvdrone-license-BSD.txt for more details.\n//\n//! @file   config.cpp\n//! @brief  A source file of AR.Drone class\n//\n// -------------------------------------------------------------------------\n\n#include \"ardrone.h\"\n\n// --------------------------------------------------------------------------\n//! @brief   Parse a configuration string.\n//! @param   str Configuration string\n//! @param   config Configuration struct\n//! @return  None\n// --------------------------------------------------------------------------\nvoid parse(const char *str, ARDRONE_CONFIG *config)\n{\n    // Split key and value\n    char category[256] = {'\\0'}, key[256] = {'\\0'}, val[256] = {'\\0'};\n    sscanf(str, \"%[^:]:%s = %[^\\n]\", category, key, val);\n    //printf(\"category = %s, key = %s, val = %s\\n\", category, key, val);\n\n    // Parse the values\n    if (!(strcmp(category, \"general\"))) {\n        if      (!strcmp(key, \"num_version_config\")) sscanf(val, \"%d\", &(config->general.num_version_config));\n        else if (!strcmp(key, \"num_version_mb\"))     sscanf(val, \"%d\", &(config->general.num_version_mb));\n        else if (!strcmp(key, \"num_version_soft\"))   strncpy(config->general.num_version_soft, val, 32);\n        else if (!strcmp(key, \"drone_serial\"))       strncpy(config->general.drone_serial, val, 32);\n        else if (!strcmp(key, \"soft_build_date\"))    strncpy(config->general.soft_build_date, val, 32);\n        else if (!strcmp(key, \"motor1_soft\"))        sscanf(val, \"%f\", &(config->general.motor1_soft));\n        else if (!strcmp(key, \"motor1_hard\"))        sscanf(val, \"%f\", &(config->general.motor1_hard));\n        else if (!strcmp(key, \"motor1_supplier\"))    sscanf(val, \"%f\", &(config->general.motor1_supplier));\n        else if (!strcmp(key, \"motor2_soft\"))        sscanf(val, \"%f\", &(config->general.motor2_soft));\n        else if (!strcmp(key, \"motor2_hard\"))        sscanf(val, \"%f\", &(config->general.motor2_hard));\n        else if (!strcmp(key, \"motor2_supplier\"))    sscanf(val, \"%f\", &(config->general.motor2_supplier));\n        else if (!strcmp(key, \"motor3_soft\"))        sscanf(val, \"%f\", &(config->general.motor3_soft));\n        else if (!strcmp(key, \"motor3_hard\"))        sscanf(val, \"%f\", &(config->general.motor3_hard));\n        else if (!strcmp(key, \"motor3_supplier\"))    sscanf(val, \"%f\", &(config->general.motor3_supplier));\n        else if (!strcmp(key, \"motor4_soft\"))        sscanf(val, \"%f\", &(config->general.motor4_soft));\n        else if (!strcmp(key, \"motor4_hard\"))        sscanf(val, \"%f\", &(config->general.motor4_hard));\n        else if (!strcmp(key, \"motor4_supplier\"))    sscanf(val, \"%f\", &(config->general.motor4_supplier));\n        else if (!strcmp(key, \"ardrone_name\"))       strncpy(config->general.ardrone_name, val, 32);\n        else if (!strcmp(key, \"flying_time\"))        sscanf(val, \"%d\", &(config->general.flying_time));\n        else if (!strcmp(key, \"navdata_demo\"))       config->general.navdata_demo = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"com_watchdog\"))       sscanf(val, \"%d\", &(config->general.com_watchdog));\n        else if (!strcmp(key, \"video_enable\"))       config->general.video_enable = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"vision_enable\"))      config->general.vision_enable = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"vbat_min\"))           sscanf(val, \"%d\", &(config->general.vbat_min));\n        else if (!strcmp(key, \"localtime\"))          sscanf(val, \"%d\", &(config->general.localtime));\n        else if (!strcmp(key, \"navdata_options\"))    sscanf(val, \"%d\", &(config->general.navdata_options));\n        else if (!strcmp(key, \"gps_soft\"))           sscanf(val, \"%f\", &(config->general.motor1_soft));\n        else if (!strcmp(key, \"gps_hard\"))           sscanf(val, \"%f\", &(config->general.motor1_hard));\n        else if (!strcmp(key, \"localtime_zone\"))     strncpy(config->general.localtime_zone, val, 32);\n        else if (!strcmp(key, \"timezone\"))           strncpy(config->general.timezone, val, 32);\n        else if (!strcmp(key, \"battery_type\"))       sscanf(val, \"%d\", &(config->general.battery_type));\n    }\n    else if (!(strcmp(category, \"control\"))) {\n        if      (!strcmp(key, \"accs_offset\"))             sscanf(val, \"{ %f %f %f }\", &(config->control.accs_offset[0]), &(config->control.accs_offset[1]), &(config->control.accs_offset[2]));\n        else if (!strcmp(key, \"accs_gains\"))              sscanf(val, \"{ %f %f %f %f %f %f %f %f %f }\", &(config->control.accs_gains[0]), &(config->control.accs_gains[1]), &(config->control.accs_gains[2]), &(config->control.accs_gains[3]), &(config->control.accs_gains[4]), &(config->control.accs_gains[5]), &(config->control.accs_gains[6]), &(config->control.accs_gains[7]), &(config->control.accs_gains[8]));\n        else if (!strcmp(key, \"gyros_offset\"))            sscanf(val, \"{ %f %f %f }\", &(config->control.gyros_offset[0]), &(config->control.gyros_offset[1]), &(config->control.gyros_offset[2]));\n        else if (!strcmp(key, \"gyros_gains\"))             sscanf(val, \"{ %f %f %f }\", &(config->control.gyros_gains[0]), &(config->control.gyros_gains[1]), &(config->control.gyros_gains[2]));\n        else if (!strcmp(key, \"gyros110_offset\"))         sscanf(val, \"{ %f %f }\", &(config->control.gyros110_offset[0]), &(config->control.gyros110_offset[1]));\n        else if (!strcmp(key, \"gyros110_gains\"))          sscanf(val, \"{ %f %f }\", &(config->control.gyros110_gains[0]), &(config->control.gyros110_gains[1]));\n        else if (!strcmp(key, \"magneto_offset\"))          sscanf(val, \"{ %f %f %f }\", &(config->control.magneto_offset[0]), &(config->control.magneto_offset[1]), &(config->control.magneto_offset[2]));\n        else if (!strcmp(key, \"magneto_radius\"))          sscanf(val, \"%f\", &(config->control.magneto_radius));\n        else if (!strcmp(key, \"gyro_offset_thr_x\"))       sscanf(val, \"%f\", &(config->control.gyro_offset_thr_x));\n        else if (!strcmp(key, \"gyro_offset_thr_y\"))       sscanf(val, \"%f\", &(config->control.gyro_offset_thr_y));\n        else if (!strcmp(key, \"gyro_offset_thr_z\"))       sscanf(val, \"%f\", &(config->control.gyro_offset_thr_z));\n        else if (!strcmp(key, \"pwm_ref_gyros\"))           sscanf(val, \"%d\", &(config->control.pwm_ref_gyros));\n        else if (!strcmp(key, \"osctun_value\"))            sscanf(val, \"%d\", &(config->control.osctun_value));\n        else if (!strcmp(key, \"osctun_test\"))             config->control.osctun_test = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"altitude_max\"))            sscanf(val, \"%d\", &(config->control.altitude_max));\n        else if (!strcmp(key, \"altitude_min\"))            sscanf(val, \"%d\", &(config->control.altitude_min));\n        else if (!strcmp(key, \"outdoor\"))                 config->control.outdoor = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"flight_without_shell\"))    config->control.flight_without_shell = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"autonomous_flight\"))       config->control.autonomous_flight = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"flight_anim\"))             sscanf(val, \"%d,%d\", &(config->control.flight_anim[0]), &(config->control.flight_anim[1]));\n        else if (!strcmp(key, \"control_level\"))           sscanf(val, \"%d\", &(config->control.control_level));\n        else if (!strcmp(key, \"euler_angle_max\"))         sscanf(val, \"%f\", &(config->control.euler_angle_max));\n        else if (!strcmp(key, \"control_iphone_tilt\"))     sscanf(val, \"%f\", &(config->control.control_iphone_tilt));\n        else if (!strcmp(key, \"control_vz_max\"))          sscanf(val, \"%f\", &(config->control.control_vz_max));\n        else if (!strcmp(key, \"control_yaw\"))             sscanf(val, \"%f\", &(config->control.control_yaw));\n        else if (!strcmp(key, \"manual_trim\"))             config->control.manual_trim = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"indoor_euler_angle_max\"))  sscanf(val, \"%f\", &(config->control.indoor_euler_angle_max));\n        else if (!strcmp(key, \"indoor_control_vz_max\"))   sscanf(val, \"%f\", &(config->control.indoor_control_vz_max));\n        else if (!strcmp(key, \"indoor_control_yaw\"))      sscanf(val, \"%f\", &(config->control.indoor_control_yaw));\n        else if (!strcmp(key, \"outdoor_euler_angle_max\")) sscanf(val, \"%f\", &(config->control.outdoor_euler_angle_max));\n        else if (!strcmp(key, \"outdoor_control_vz_max\"))  sscanf(val, \"%f\", &(config->control.outdoor_control_vz_max));\n        else if (!strcmp(key, \"outdoor_control_yaw\"))     sscanf(val, \"%f\", &(config->control.outdoor_control_yaw));\n        else if (!strcmp(key, \"flying_mode\"))             sscanf(val, \"%d\", &(config->control.flying_mode));\n        else if (!strcmp(key, \"hovering_range\"))          sscanf(val, \"%d\", &(config->control.hovering_range));\n        else if (!strcmp(key, \"flying_camera_mode\"))      sscanf(val, \"%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\", &(config->control.flying_camera_mode[0]), &(config->control.flying_camera_mode[1]), &(config->control.flying_camera_mode[2]), &(config->control.flying_camera_mode[3]), &(config->control.flying_camera_mode[4]), &(config->control.flying_camera_mode[5]), &(config->control.flying_camera_mode[6]), &(config->control.flying_camera_mode[7]), &(config->control.flying_camera_mode[8]), &(config->control.flying_camera_mode[9]));\n        else if (!strcmp(key, \"flying_camera_enable\"))    config->control.flying_camera_enable = (!strcmp(val, \"TRUE\")) ? true : false;\n    }\n    else if (!(strcmp(category, \"network\"))) {\n        if      (!strcmp(key, \"ssid_single_player\")) strncpy(config->network.ssid_single_player, val, 32);\n        else if (!strcmp(key, \"ssid_multi_player\"))  strncpy(config->network.ssid_multi_player, val, 32);\n        else if (!strcmp(key, \"wifi_mode\"))          sscanf(val, \"%d\", &(config->network.wifi_mode));\n        else if (!strcmp(key, \"wifi_rate\"))          sscanf(val, \"%d\", &(config->network.wifi_rate));\n        else if (!strcmp(key, \"owner_mac\"))          strncpy(config->network.owner_mac, val, 18);\n    }\n    else if (!(strcmp(category, \"pic\"))) {\n        if      (!strcmp(key, \"ultrasound_freq\"))     sscanf(val, \"%d\", &(config->pic.ultrasound_freq));\n        else if (!strcmp(key, \"ultrasound_watchdog\")) sscanf(val, \"%d\", &(config->pic.ultrasound_watchdog));\n        else if (!strcmp(key, \"pic_version\"))         sscanf(val, \"%d\", &(config->pic.pic_version));\n    }\n    else if (!(strcmp(category, \"video\"))) {\n        if      (!strcmp(key, \"camif_fps\"))           sscanf(val, \"%d\", &(config->video.camif_fps));\n        else if (!strcmp(key, \"camif_buffers\"))       sscanf(val, \"%d\", &(config->video.camif_buffers));\n        else if (!strcmp(key, \"num_trackers\"))        sscanf(val, \"%d\", &(config->video.num_trackers));\n        else if (!strcmp(key, \"video_storage_space\")) sscanf(val, \"%d\", &(config->video.video_storage_space));\n        else if (!strcmp(key, \"video_on_usb\"))        config->video.video_on_usb = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"video_file_index\"))    sscanf(val, \"%d\", &(config->video.video_file_index));\n        else if (!strcmp(key, \"bitrate\"))             sscanf(val, \"%d\", &(config->video.bitrate));\n        else if (!strcmp(key, \"bitrate_ctrl_mode\"))   sscanf(val, \"%d\", &(config->video.bitrate_ctrl_mode));\n        else if (!strcmp(key, \"bitrate_storage\"))     sscanf(val, \"%d\", &(config->video.bitrate_storage));\n        else if (!strcmp(key, \"codec_fps\"))           sscanf(val, \"%d\", &(config->video.codec_fps));\n        else if (!strcmp(key, \"video_codec\"))         sscanf(val, \"%d\", &(config->video.video_codec));\n        else if (!strcmp(key, \"video_slices\"))        sscanf(val, \"%d\", &(config->video.video_slices));\n        else if (!strcmp(key, \"video_live_socket\"))   sscanf(val, \"%d\", &(config->video.video_live_socket));\n        else if (!strcmp(key, \"max_bitrate\"))         sscanf(val, \"%d\", &(config->video.max_bitrate));\n        else if (!strcmp(key, \"video_channel\"))       sscanf(val, \"%d\", &(config->video.video_channel));\n        else if (!strcmp(key, \"exposure_mode\"))       sscanf(val, \"%d,%d,%d,%d\", &(config->video.exposure_mode[0]), &(config->video.exposure_mode[1]), &(config->video.exposure_mode[2]), &(config->video.exposure_mode[3]));\n        else if (!strcmp(key, \"saturation_mode\"))     sscanf(val, \"%d\", &(config->video.saturation_mode));\n        else if (!strcmp(key, \"whitebalance_mode\"))   sscanf(val, \"%d,%d\", &(config->video.whitebalance_mode[0]), &(config->video.whitebalance_mode[1]));\n    }\n    else if (!(strcmp(category, \"leds\"))) {\n        if (!strcmp(key, \"leds_anim\")) sscanf(val, \"%d,%d,%d\", &(config->leds.leds_anim[0]), &(config->leds.leds_anim[1]), &(config->leds.leds_anim[2]));\n    }\n    else if (!(strcmp(category, \"detect\"))) {\n        if      (!strcmp(key, \"enemy_colors\"))              sscanf(val, \"%d\", &(config->detect.enemy_colors));\n        else if (!strcmp(key, \"enemy_without_shell\"))       sscanf(val, \"%d\", &(config->detect.enemy_without_shell));\n        else if (!strcmp(key, \"groundstripe_colors\"))       sscanf(val, \"%d\", &(config->detect.groundstripe_colors));\n        else if (!strcmp(key, \"detect_type\"))               sscanf(val, \"%d\", &(config->detect.detect_type));\n        else if (!strcmp(key, \"detections_select_h\"))       sscanf(val, \"%d\", &(config->detect.detections_select_h));\n        else if (!strcmp(key, \"detections_select_v_hsync\")) sscanf(val, \"%d\", &(config->detect.detections_select_v_hsync));\n        else if (!strcmp(key, \"detections_select_v\"))       sscanf(val, \"%d\", &(config->detect.detections_select_v));\n    }\n    else if (!(strcmp(category, \"syslog\"))) {\n        if      (!strcmp(key, \"output\"))   sscanf(val, \"%d\", &(config->syslog.output));\n        else if (!strcmp(key, \"max_size\")) sscanf(val, \"%d\", &(config->syslog.max_size));\n        else if (!strcmp(key, \"nb_files\")) sscanf(val, \"%d\", &(config->syslog.nb_files));\n    }\n    else if (!(strcmp(category, \"custom\"))) {\n        if      (!strcmp(key, \"application_desc\")) strncpy(config->custom.application_desc, val, 64);\n        else if (!strcmp(key, \"profile_desc\"))     strncpy(config->custom.profile_desc, val, 64);\n        else if (!strcmp(key, \"session_desc\"))     strncpy(config->custom.session_desc, val, 64);\n        else if (!strcmp(key, \"application_id\"))   strncpy(config->custom.application_id, val, 8);\n        else if (!strcmp(key, \"profile_id\"))       strncpy(config->custom.profile_id, val, 8);\n        else if (!strcmp(key, \"session_id\"))       strncpy(config->custom.session_id, val, 8);\n    }\n    else if (!(strcmp(category, \"userbox\"))) {\n        if (!strcmp(key, \"userbox_cmd\")) sscanf(val, \"%d\", &(config->userbox.userbox_cmd));\n    }\n    else if (!(strcmp(category, \"gps\"))) {\n        if      (!strcmp(key, \"latitude\"))  sscanf(val, \"%f\", &(config->gps.latitude));\n        else if (!strcmp(key, \"longitude\")) sscanf(val, \"%f\", &(config->gps.longitude));\n        else if (!strcmp(key, \"altitude\"))  sscanf(val, \"%f\", &(config->gps.altitude));\n        else if (!strcmp(key, \"accuracy\"))  sscanf(val, \"%f\", &(config->gps.accuracy));\n    }\n    else if (!(strcmp(category, \"flightplan\"))) {\n        if      (!strcmp(key, \"default_validation_radius\")) sscanf(val, \"%f\", &(config->flightplan.default_validation_radius));\n        else if (!strcmp(key, \"default_validation_time\"))   sscanf(val, \"%f\", &(config->flightplan.default_validation_time));\n        else if (!strcmp(key, \"max_distance_from_takeoff\")) sscanf(val, \"%d\", &(config->flightplan.max_distance_from_takeoff));\n        else if (!strcmp(key, \"gcs_ip\"))                    sscanf(val, \"%d\", &(config->flightplan.gcs_ip));\n        else if (!strcmp(key, \"video_stop_delay\"))          sscanf(val, \"%d\", &(config->flightplan.video_stop_delay));\n        else if (!strcmp(key, \"low_battery_go_home\"))       config->flightplan.low_battery_go_home = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"automatic_heading\"))         config->flightplan.automatic_heading = (!strcmp(val, \"TRUE\")) ? true : false;\n        else if (!strcmp(key, \"com_lost_action_delay\"))     sscanf(val, \"%d\", &(config->flightplan.com_lost_action_delay));\n        else if (!strcmp(key, \"altitude_go_home\"))          sscanf(val, \"%d\", &(config->flightplan.altitude_go_home));\n        else if (!strcmp(key, \"mavlink_js_roll_left\"))      strncpy(config->flightplan.mavlink_js_roll_left, val, 3);\n        else if (!strcmp(key, \"mavlink_js_roll_right\"))     strncpy(config->flightplan.mavlink_js_roll_right, val, 3);\n        else if (!strcmp(key, \"mavlink_js_pitch_front\"))    strncpy(config->flightplan.mavlink_js_pitch_front, val, 3);\n        else if (!strcmp(key, \"mavlink_js_pitch_back\"))     strncpy(config->flightplan.mavlink_js_pitch_back, val, 3);\n        else if (!strcmp(key, \"mavlink_js_yaw_left\"))       strncpy(config->flightplan.mavlink_js_yaw_left, val, 3);\n        else if (!strcmp(key, \"mavlink_js_yaw_right\"))      strncpy(config->flightplan.mavlink_js_yaw_right, val, 3);\n        else if (!strcmp(key, \"mavlink_js_go_up\"))          strncpy(config->flightplan.mavlink_js_go_up, val, 3);\n        else if (!strcmp(key, \"mavlink_js_go_down\"))        strncpy(config->flightplan.mavlink_js_go_down, val, 3);\n        else if (!strcmp(key, \"mavlink_js_inc_gains\"))      strncpy(config->flightplan.mavlink_js_inc_gains, val, 3);\n        else if (!strcmp(key, \"mavlink_js_dec_gains\"))      strncpy(config->flightplan.mavlink_js_dec_gains, val, 3);\n        else if (!strcmp(key, \"mavlink_js_select\"))         strncpy(config->flightplan.mavlink_js_select, val, 3);\n        else if (!strcmp(key, \"mavlink_js_start\"))          strncpy(config->flightplan.mavlink_js_start, val, 3);\n    }\n    else if (!(strcmp(category, \"rescue\"))) {\n        if (!strcmp(key, \"rescue\")) sscanf(val, \"%d\", &(config->rescue.rescue));\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get current configurations of AR.Drone.\n//! @return  Result of this function\n//! @retval  1 Success\n//! @retval  0 Failure\n// --------------------------------------------------------------------------\nint ARDrone::getConfig(void)\n{\n    // Open the IP address and port\n    TCPSocket sockConfig;\n    if (!sockConfig.open(ip, ARDRONE_CONTROL_PORT)) {\n        CVDRONE_ERROR(\"TCPSocket::open(port=%d) failed. (%s, %d)\\n\", ARDRONE_CONTROL_PORT, __FILE__, __LINE__);\n        return 0;\n    }\n\n    // Send requests\n    UDPSocket tmpCommand;\n    tmpCommand.open(ip, ARDRONE_AT_PORT);\n    tmpCommand.sendf(\"AT*CTRL=%d,5,0\\r\", ++seq);\n    tmpCommand.sendf(\"AT*CTRL=%d,4,0\\r\", ++seq);\n    msleep(500);\n    tmpCommand.close();\n\n    // Receive data\n    char buf[10000] = {'\\0'};\n    int size = sockConfig.receive((void*)&buf, sizeof(buf));\n\n    // Received something\n    if (size > 0) {\n        #if 0\n        // Saving config.ini\n        FILE *file = fopen(\"config.ini\", \"w\");\n        if (file) {\n            fprintf(file, buf);\n            fclose(file);\n        }\n        #endif\n\n        // Clear config struct\n        memset(&config, 0, sizeof(config));\n\n        // Parsing configurations\n        char *token = strtok(buf, \"\\n\");\n        if (token != NULL) parse(token, &config);\n        while (token != NULL) {\n            token = strtok(NULL, \"\\n\");\n            if (token != NULL) parse(token, &config);\n        }\n    }\n\n    #if 0\n    // For debug\n    printf(\"general.num_version_config = %d\\n\", config.general.num_version_config);\n    printf(\"general.num_version_mb = %d\\n\", config.general.num_version_mb);\n    printf(\"general.num_version_soft = %s\\n\", config.general.num_version_soft);\n    printf(\"general.drone_serial = %s\\n\", config.general.drone_serial);\n    printf(\"general.soft_build_date = %s\\n\", config.general.soft_build_date);\n    printf(\"general.motor1_soft = %f\\n\", config.general.motor1_soft);\n    printf(\"general.motor1_hard = %f\\n\", config.general.motor1_hard);\n    printf(\"general.motor1_supplier = %f\\n\", config.general.motor1_supplier);\n    printf(\"general.motor2_soft = %f\\n\", config.general.motor2_soft);\n    printf(\"general.motor2_hard = %f\\n\", config.general.motor2_hard);\n    printf(\"general.motor2_supplier = %f\\n\", config.general.motor2_supplier);\n    printf(\"general.motor3_soft = %f\\n\", config.general.motor3_soft);\n    printf(\"general.motor3_hard = %f\\n\", config.general.motor3_hard);\n    printf(\"general.motor3_supplier = %f\\n\", config.general.motor3_supplier);\n    printf(\"general.motor4_soft = %f\\n\", config.general.motor4_soft);\n    printf(\"general.motor4_hard = %f\\n\", config.general.motor4_hard);\n    printf(\"general.motor4_supplier = %f\\n\", config.general.motor4_supplier);\n    printf(\"general.ardrone_name = %s\\n\", config.general.ardrone_name);\n    printf(\"general.flying_time = %d\\n\", config.general.flying_time);\n    printf(\"general.navdata_demo = %s\\n\", config.general.navdata_demo ? \"true\" : \"false\");\n    printf(\"general.com_watchdog = %d\\n\", config.general.com_watchdog);\n    printf(\"general.video_enable = %s\\n\", config.general.video_enable ? \"true\" : \"false\");\n    printf(\"general.vision_enable = %s\\n\", config.general.vision_enable ? \"true\" : \"false\");\n    printf(\"general.vbat_min = %d\\n\", config.general.vbat_min);\n    printf(\"general.localtime = %d\\n\", config.general.localtime);\n    printf(\"general.navdata_options = %d\\n\", config.general.navdata_options);\n    printf(\"general.gps_soft = %f\\n\", config.general.gps_soft);\n    printf(\"general.gps_hard = %f\\n\", config.general.gps_hard);\n    printf(\"general.localtime_zone = %s\\n\", config.general.localtime_zone);\n    printf(\"general.battery_type = %d\\n\", config.general.battery_type);\n    printf(\"control.accs_offset = {%f, %f, %f}\\n\", config.control.accs_offset[0], config.control.accs_offset[1], config.control.accs_offset[2]);\n    printf(\"control.accs_gains = { %f %f %f %f %f %f %f %f %f }\\n\", config.control.accs_gains[0], config.control.accs_gains[1], config.control.accs_gains[2], config.control.accs_gains[3], config.control.accs_gains[4], config.control.accs_gains[5], config.control.accs_gains[6], config.control.accs_gains[7], config.control.accs_gains[8]);\n    printf(\"control.gyros_offset = { %f %f %f }\\n\", config.control.gyros_offset[0], config.control.gyros_offset[1], config.control.gyros_offset[2]);\n    printf(\"control.gyros_gains = { %f %f %f }\\n\", config.control.gyros_gains[0], config.control.gyros_gains[1], config.control.gyros_gains[2]);\n    printf(\"control.gyros110_offset = { %f %f }\\n\", config.control.gyros110_offset[0], config.control.gyros110_offset[1]);\n    printf(\"control.gyros110_gains = { %f %f }\\n\", config.control.gyros110_gains[0], config.control.gyros110_gains[1]);\n    printf(\"control.magneto_offset = { %f %f %f }\\n\", config.control.magneto_offset[0], config.control.magneto_offset[1], config.control.magneto_offset[2]);\n    printf(\"control.magneto_radius = %f\\n\", config.control.magneto_radius);\n    printf(\"control.gyro_offset_thr_x = %f\\n\", config.control.gyro_offset_thr_x);\n    printf(\"control.gyro_offset_thr_y = %f\\n\", config.control.gyro_offset_thr_y);\n    printf(\"control.gyro_offset_thr_z = %f\\n\", config.control.gyro_offset_thr_z);\n    printf(\"control.pwm_ref_gyros = %d\\n\", config.control.pwm_ref_gyros);\n    printf(\"control.osctun_value = %d\\n\", config.control.osctun_value);\n    printf(\"control.osctun_test = %s\\n\", config.control.osctun_test ? \"true\" : \"false\");\n    printf(\"control.altitude_max = %d\\n\", config.control.altitude_max);\n    printf(\"control.altitude_min = %d\\n\", config.control.altitude_min);\n    printf(\"control.outdoor = %s\\n\", config.control.outdoor ? \"true\" : \"false\");\n    printf(\"control.flight_without_shell = %s\\n\", config.control.flight_without_shell ? \"true\" : \"false\");\n    printf(\"control.autonomous_flight = %s\\n\", config.control.autonomous_flight ? \"true\" : \"false\");\n    printf(\"control.flight_anim = %d,%d\\n\", config.control.flight_anim[0], config.control.flight_anim[1]);\n    printf(\"control.control_level = %d\\n\", config.control.control_level);\n    printf(\"control.euler_angle_max = %f\\n\", config.control.euler_angle_max);\n    printf(\"control.control_iphone_tilt = %f\\n\", config.control.control_iphone_tilt);\n    printf(\"control.control_vz_max = %f\\n\", config.control.control_vz_max);\n    printf(\"control.control_yaw = %f\\n\", config.control.control_yaw);\n    printf(\"control.manual_trim = %s\\n\", config.control.manual_trim ? \"true\" : \"false\");\n    printf(\"control.indoor_euler_angle_max = %f\\n\", config.control.indoor_euler_angle_max);\n    printf(\"control.indoor_control_vz_max = %f\\n\", config.control.indoor_control_vz_max);\n    printf(\"control.indoor_control_yaw = %f\\n\", config.control.indoor_control_yaw);\n    printf(\"control.outdoor_euler_angle_max = %f\\n\", config.control.outdoor_euler_angle_max);\n    printf(\"control.outdoor_control_vz_max = %f\\n\", config.control.outdoor_control_vz_max);\n    printf(\"control.outdoor_control_yaw = %f\\n\", config.control.outdoor_control_yaw);\n    printf(\"control.flying_mode = %d\\n\", config.control.flying_mode);\n    printf(\"control.hovering_range = %d\\n\", config.control.hovering_range);\n    printf(\"control.flying_camera_mode = %d,%d,%d,%d,%d,%d,%d,%d,%d,%d\\n\", config.control.flying_camera_mode[0], config.control.flying_camera_mode[1], config.control.flying_camera_mode[2], config.control.flying_camera_mode[3], config.control.flying_camera_mode[4], config.control.flying_camera_mode[5], config.control.flying_camera_mode[6], config.control.flying_camera_mode[7], config.control.flying_camera_mode[8], config.control.flying_camera_mode[9]);\n    printf(\"control.flying_camera_enable = %s\\n\", config.control.flying_camera_enable ? \"true\" : \"false\");\n    printf(\"network.ssid_single_player = %s\\n\", config.network.ssid_single_player);\n    printf(\"network.ssid_multi_player = %s\\n\", config.network.ssid_multi_player);\n    printf(\"network.wifi_mode = %d\\n\", config.network.wifi_mode);\n    printf(\"network.wifi_rate = %d\\n\", config.network.wifi_rate);\n    printf(\"network.owner_mac = %s\\n\", config.network.owner_mac);\n    printf(\"pic.ultrasound_freq = %d\\n\", config.pic.ultrasound_freq);\n    printf(\"pic.ultrasound_watchdog = %d\\n\", config.pic.ultrasound_watchdog);\n    printf(\"pic.pic_version = %d\\n\", config.pic.pic_version);\n    printf(\"video.camif_fps = %d\\n\", config.video.camif_fps);\n    printf(\"video.camif_buffers = %d\\n\", config.video.camif_buffers);\n    printf(\"video.num_trackers = %d\\n\", config.video.num_trackers);\n    printf(\"video.video_storage_space = %d\\n\", config.video.video_storage_space);\n    printf(\"video.video_on_usb = %s\\n\", config.video.video_on_usb ? \"true\" : \"false\");\n    printf(\"video.video_file_index = %d\\n\", config.video.video_file_index);\n    printf(\"video.bitrate = %d\\n\", config.video.bitrate);\n    printf(\"video.bitrate_ctrl_mode = %d\\n\", config.video.bitrate_ctrl_mode);\n    printf(\"video.bitrate_storage = %d\\n\", config.video.bitrate_storage);\n    printf(\"video.codec_fps = %d\\n\", config.video.codec_fps);\n    printf(\"video.video_codec = %d\\n\", config.video.video_codec);\n    printf(\"video.video_slices = %d\\n\", config.video.video_slices);\n    printf(\"video.video_live_socket = %d\\n\", config.video.video_live_socket);\n    printf(\"video.max_bitrate = %d\\n\", config.video.max_bitrate);\n    printf(\"video.video_channel = %d\\n\", config.video.video_channel);\n    printf(\"video.exposure_mode = %d,%d,%d,%d\\n\", config.video.exposure_mode[0], config.video.exposure_mode[1], config.video.exposure_mode[2], config.video.exposure_mode[3]);\n    printf(\"video.saturation_mode = %d\\n\", config.video.saturation_mode);\n    printf(\"video.whitebalance_mode = %d,%d\\n\", config.video.whitebalance_mode[0], config.video.whitebalance_mode[1]);\n    printf(\"leds.leds_anim = %d,%d,%d\\n\", config.leds.leds_anim[0], config.leds.leds_anim[1], config.leds.leds_anim[2]);\n    printf(\"detect.enemy_colors = %d\\n\", config.detect.enemy_colors);\n    printf(\"detect.enemy_without_shell = %d\\n\", config.detect.enemy_without_shell);\n    printf(\"detect.groundstripe_colors = %d\\n\", config.detect.groundstripe_colors);\n    printf(\"detect.detect_type = %d\\n\", config.detect.detect_type);\n    printf(\"detect.detections_select_h = %d\\n\", config.detect.detections_select_h);\n    printf(\"detect.detections_select_v_hsync = %d\\n\", config.detect.detections_select_v_hsync);\n    printf(\"detect.detections_select_v = %d\\n\", config.detect.detections_select_v);\n    printf(\"syslog.output = %d\\n\", config.syslog.output);\n    printf(\"syslog.max_size = %d\\n\", config.syslog.max_size);\n    printf(\"syslog.nb_files = %d\\n\", config.syslog.nb_files);\n    printf(\"custom.application_desc = %s\\n\", config.custom.application_desc);\n    printf(\"custom.profile_desc = %s\\n\", config.custom.profile_desc);\n    printf(\"custom.session_desc = %s\\n\", config.custom.session_desc);\n    printf(\"custom.application_id = %s\\n\", config.custom.application_id);\n    printf(\"custom.profile_id = %s\\n\", config.custom.profile_id);\n    printf(\"custom.session_id = %s\\n\", config.custom.session_id);\n    printf(\"userbox.userbox_cmd = %d\\n\", config.userbox.userbox_cmd);\n    printf(\"gps.latitude = %f\\n\", config.gps.latitude);\n    printf(\"gps.longitude = %f\\n\", config.gps.longitude);\n    printf(\"gps.altitude = %f\\n\", config.gps.altitude);\n    printf(\"gps.accuracy = %f\\n\", config.gps.accuracy);\n    printf(\"flightplan.default_validation_radius = %f\\n\", config.flightplan.default_validation_radius);\n    printf(\"flightplan.default_validation_time = %f\\n\", config.flightplan.default_validation_time);\n    printf(\"flightplan.max_distance_from_takeoff = %d\\n\", config.flightplan.max_distance_from_takeoff);\n    printf(\"flightplan.gcs_ip = %d\\n\", config.flightplan.gcs_ip);\n    printf(\"flightplan.video_stop_delay = %d\\n\", config.flightplan.video_stop_delay);\n    printf(\"flightplan.low_battery_go_home = %s\\n\", config.flightplan.low_battery_go_home ? \"true\" : \"false\");\n    printf(\"flightplan.automatic_heading = %s\\n\", config.flightplan.automatic_heading ? \"true\" : \"false\");\n    printf(\"flightplan.com_lost_action_delay = %d\\n\", config.flightplan.com_lost_action_delay);\n    printf(\"flightplan.altitude_go_home = %d\\n\", config.flightplan.altitude_go_home);\n    printf(\"flightplan.mavlink_js_roll_left = %s\\n\", config.flightplan.mavlink_js_roll_left);\n    printf(\"flightplan.mavlink_js_roll_right = %s\\n\", config.flightplan.mavlink_js_roll_right);\n    printf(\"flightplan.mavlink_js_pitch_front = %s\\n\", config.flightplan.mavlink_js_pitch_front);\n    printf(\"flightplan.mavlink_js_pitch_back = %s\\n\", config.flightplan.mavlink_js_pitch_back);\n    printf(\"flightplan.mavlink_js_yaw_left = %s\\n\", config.flightplan.mavlink_js_yaw_left);\n    printf(\"flightplan.mavlink_js_yaw_right = %s\\n\", config.flightplan.mavlink_js_yaw_right);\n    printf(\"flightplan.mavlink_js_go_up = %s\\n\", config.flightplan.mavlink_js_go_up);\n    printf(\"flightplan.mavlink_js_go_down = %s\\n\", config.flightplan.mavlink_js_go_down);\n    printf(\"flightplan.mavlink_js_inc_gains = %s\\n\", config.flightplan.mavlink_js_inc_gains);\n    printf(\"flightplan.mavlink_js_dec_gains = %s\\n\", config.flightplan.mavlink_js_dec_gains);\n    printf(\"flightplan.mavlink_js_select = %s\\n\", config.flightplan.mavlink_js_select);\n    printf(\"flightplan.mavlink_js_start = %s\\n\", config.flightplan.mavlink_js_start);\n    printf(\"rescue.rescue = %d\\n\", config.rescue.rescue);\n    #endif\n\n    // Finalize\n    sockConfig.close();\n\n    return 1;\n}"
  },
  {
    "path": "src/ardrone/navdata.cpp",
    "content": "// -------------------------------------------------------------------------\n// CV Drone (= OpenCV + AR.Drone)\n// Copyright(C) 2016 puku0x\n// https://github.com/puku0x/cvdrone\n//\n// This source file is part of CV Drone library.\n//\n// This library is free software; you can redistribute it and/or\n// modify it under the terms of EITHER:\n// (1) The GNU Lesser General Public License as published by the Free\n//     Software Foundation; either version 2.1 of the License, or (at\n//     your option) any later version. The text of the GNU Lesser\n//     General Public License is included with this library in the\n//     file cvdrone-license-LGPL.txt.\n// (2) The BSD-style license that is included with this library in\n//     the file cvdrone-license-BSD.txt.\n// \n// This library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files\n// cvdrone-license-LGPL.txt and cvdrone-license-BSD.txt for more details.\n//\n//! @file   navdata.cpp\n//! @brief  Navigation data\n//\n// -------------------------------------------------------------------------\n\n#include \"ardrone.h\"\n\n// --------------------------------------------------------------------------\n//! @brief   Initialize Navdata.\n//! @return  Result of initialization\n//! @retval  1 Success\n//! @retval  0 Failure\n// --------------------------------------------------------------------------\nint ARDrone::initNavdata(void)\n{\n    // Open the IP address and port\n    if (!sockNavdata.open(ip, ARDRONE_NAVDATA_PORT)) {\n        CVDRONE_ERROR(\"UDPSocket::open(port=%d) was failed. (%s, %d)\\n\", ARDRONE_NAVDATA_PORT, __FILE__, __LINE__);\n        return 0;\n    }\n\n    // Clear Navdata\n    memset(&navdata, 0, sizeof(navdata));\n\n    // Start Navdata\n    sockNavdata.sendf(\"\\x01\\x00\\x00\\x00\");\n\n    // AR.Drone 2.0\n    if (version.major == ARDRONE_VERSION_2) {\n        // Disable BOOTSTRAP mode\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        sockCommand.sendf(\"AT*CONFIG_IDS=%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\r\", ++seq, ARDRONE_SESSION_ID, ARDRONE_PROFILE_ID, ARDRONE_APPLOCATION_ID);\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"general:navdata_demo\\\",\\\"TRUE\\\"\\r\", ++seq);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"general:navdata_demo\\\",\\\"FALSE\\\"\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n        msleep(100);\n\n        // Seed ACK\n        sockCommand.sendf(\"AT*CTRL=%d,0\\r\", ++seq);\n    }\n    // AR.Drone 1.0\n    else {\n        // Disable BOOTSTRAP mode\n        if (mutexCommand) pthread_mutex_lock(mutexCommand);\n        //sockCommand.sendf(\"AT*CONFIG=%d,\\\"general:navdata_demo\\\",\\\"TRUE\\\"\\r\", ++seq);\n        sockCommand.sendf(\"AT*CONFIG=%d,\\\"general:navdata_demo\\\",\\\"FALSE\\\"\\r\", ++seq);\n        if (mutexCommand) pthread_mutex_unlock(mutexCommand);\n\n        // Send ACK\n        sockCommand.sendf(\"AT*CTRL=%d,0\\r\", ++seq);\n    }\n\n    // Create a mutex\n    mutexNavdata = new pthread_mutex_t;\n    pthread_mutex_init(mutexNavdata, NULL);\n\n    // Create a thread\n    threadNavdata = new pthread_t;\n    if (pthread_create(threadNavdata, NULL, runNavdata, this) != 0) {\n        CVDRONE_ERROR(\"pthread_create() was failed. (%s, %d)\\n\", __FILE__, __LINE__);\n        return 0;\n    }\n\n    return 1;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Thread function for Navdata.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::loopNavdata(void)\n{\n    while (1) {\n        // Get Navdata\n        if (!getNavdata()) break;\n        pthread_testcancel();\n        msleep(10);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get current navigation data of AR.Drone.\n//! @return  Result of this function\n//! @retval  1 Success\n//! @retval  0 Failure\n// --------------------------------------------------------------------------\nint ARDrone::getNavdata(void)\n{\n    // Send a request\n    sockNavdata.sendf(\"\\x01\\x00\\x00\\x00\");\n\n    // Receive data\n    char buf[4096] = {'\\0'};\n    int size = sockNavdata.receive((void*)&buf, sizeof(buf));\n\n    // Received something\n    if (size > 0) {\n        // Enable mutex lock\n        if (mutexNavdata) pthread_mutex_lock(mutexNavdata);\n\n        // Header\n        int index = 0;\n        memcpy((void*)&(navdata.header),         (const void*)(buf + index), 4); index += 4;\n        memcpy((void*)&(navdata.ardrone_state),  (const void*)(buf + index), 4); index += 4;\n        memcpy((void*)&(navdata.sequence),       (const void*)(buf + index), 4); index += 4;\n        memcpy((void*)&(navdata.vision_defined), (const void*)(buf + index), 4); index += 4;\n\n        // Parse navdata\n        while (index < size) {\n            // Tag and data size\n            unsigned short tmp_tag, tmp_size;\n            memcpy((void*)&tmp_tag,  (const void*)(buf + index), 2); index += 2;  // tag\n            memcpy((void*)&tmp_size, (const void*)(buf + index), 2); index += 2;  // size\n            index -= 4;\n\n            // Copy to NAVDATA structure\n            switch (tmp_tag) {\n                case ARDRONE_NAVDATA_DEMO_TAG:\n                    memcpy((void*)&(navdata.demo),            (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.demo)));\n                    break;\n                case ARDRONE_NAVDATA_TIME_TAG:\n                    memcpy((void*)&(navdata.time),            (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.time)));\n                    break;\n                case ARDRONE_NAVDATA_RAW_MEASURES_TAG:\n                    memcpy((void*)&(navdata.raw_measures),    (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.raw_measures)));\n                    break;\n                case ARDRONE_NAVDATA_PHYS_MEASURES_TAG:\n                    memcpy((void*)&(navdata.phys_measures),   (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.phys_measures)));\n                    break;\n                case ARDRONE_NAVDATA_GYROS_OFFSETS_TAG:\n                    memcpy((void*)&(navdata.gyros_offsets),   (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.gyros_offsets)));\n                    break;\n                case ARDRONE_NAVDATA_EULER_ANGLES_TAG:\n                    memcpy((void*)&(navdata.euler_angles),    (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.euler_angles)));\n                    break;\n                case ARDRONE_NAVDATA_REFERENCES_TAG:\n                    memcpy((void*)&(navdata.references),      (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.references)));\n                    break;\n                case ARDRONE_NAVDATA_TRIMS_TAG:\n                    memcpy((void*)&(navdata.trims),           (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.trims)));\n                    break;\n                case ARDRONE_NAVDATA_RC_REFERENCES_TAG:\n                    memcpy((void*)&(navdata.rc_references),   (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.rc_references)));\n                    break;\n                case ARDRONE_NAVDATA_PWM_TAG:\n                    memcpy((void*)&(navdata.pwm),             (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.pwm)));\n                    break;\n                case ARDRONE_NAVDATA_ALTITUDE_TAG:\n                    memcpy((void*)&(navdata.altitude),        (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.altitude)));\n                    break;\n                case ARDRONE_NAVDATA_VISION_RAW_TAG:\n                    memcpy((void*)&(navdata.vision_raw),      (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.vision_raw)));\n                    break;\n                case ARDRONE_NAVDATA_VISION_OF_TAG:\n                    memcpy((void*)&(navdata.vision_of),       (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.vision_of)));\n                    break;\n                case ARDRONE_NAVDATA_VISION_TAG:\n                    memcpy((void*)&(navdata.vision),          (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.vision)));\n                    break;\n                case ARDRONE_NAVDATA_VISION_PERF_TAG:\n                    memcpy((void*)&(navdata.vision_perf),     (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.vision_perf)));\n                    break;\n                case ARDRONE_NAVDATA_TRACKERS_SEND_TAG:\n                    memcpy((void*)&(navdata.trackers_send),   (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.trackers_send)));\n                    break;\n                case ARDRONE_NAVDATA_VISION_DETECT_TAG:\n                    memcpy((void*)&(navdata.vision_detect),   (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.vision_detect)));\n                    break;\n                case ARDRONE_NAVDATA_WATCHDOG_TAG:\n                    memcpy((void*)&(navdata.watchdog),        (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.watchdog)));\n                    break;\n                case ARDRONE_NAVDATA_ADC_DATA_FRAME_TAG:\n                    memcpy((void*)&(navdata.adc_data_frame),  (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.adc_data_frame)));\n                    break;\n                case ARDRONE_NAVDATA_VIDEO_STREAM_TAG:\n                    memcpy((void*)&(navdata.video_stream),    (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.video_stream)));\n                    break;\n                case ARDRONE_NAVDATA_GAME_TAG:\n                    memcpy((void*)&(navdata.games),           (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.games)));\n                    break;\n                case ARDRONE_NAVDATA_PRESSURE_RAW_TAG:\n                    memcpy((void*)&(navdata.pressure_raw),    (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.pressure_raw)));\n                    break;\n                case ARDRONE_NAVDATA_MAGNETO_TAG:\n                    memcpy((void*)&(navdata.magneto),         (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.magneto)));\n                    break;\n                case ARDRONE_NAVDATA_WIND_TAG:\n                    memcpy((void*)&(navdata.wind),            (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.wind)));\n                    break;\n                case ARDRONE_NAVDATA_KALMAN_PRESSURE_TAG:\n                    memcpy((void*)&(navdata.kalman_pressure), (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.kalman_pressure)));\n                    break;\n                case ARDRONE_NAVDATA_HDVIDEO_STREAM_TAG:\n                    memcpy((void*)&(navdata.hdvideo_stream),  (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.hdvideo_stream)));\n                    break;\n                case ARDRONE_NAVDATA_WIFI_TAG:\n                    memcpy((void*)&(navdata.wifi),            (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.wifi)));\n                    break;\n                case ARDRONE_NAVDATA_GPS_TAG:\n                    if (version.major == 2 && version.minor == 4) memcpy((void*)&(navdata.gps),        (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.gps)));\n                    else                                          memcpy((void*)&(navdata.zimmu_3000), (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.zimmu_3000)));\n                    break;\n                case 28:\n                    break;\n                case 29:\n                    break;\n                default:\n                    memcpy((void*)&(navdata.cks),             (const void*)(buf + index), MIN(tmp_size, sizeof(navdata.cks)));\n                    break;\n            }\n            index += tmp_size;\n        }\n\n        // Disable mutex lock\n        if (mutexNavdata) pthread_mutex_unlock(mutexNavdata);\n    }\n\n    return 1;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get current role angle of AR.Drone.\n//! @return  Role angle [rad]\n// --------------------------------------------------------------------------\ndouble ARDrone::getRoll(void)\n{\n    // Get the data\n    if (mutexNavdata) pthread_mutex_lock(mutexNavdata);\n    double roll = navdata.demo.phi * 0.001 * DEG_TO_RAD;\n    if (mutexNavdata) pthread_mutex_unlock(mutexNavdata);\n\n    return roll;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get current pitch angle of AR.Drone.\n//! @return  Pitch angle [rad]\n// --------------------------------------------------------------------------\ndouble ARDrone::getPitch(void)\n{\n    // Get the data\n    if (mutexNavdata) pthread_mutex_lock(mutexNavdata);\n    double pitch = -navdata.demo.theta * 0.001 * DEG_TO_RAD;\n    if (mutexNavdata) pthread_mutex_unlock(mutexNavdata);\n\n    return pitch;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get current yaw angle of AR.Drone.\n//! @return  Yaw angle [rad]\n// --------------------------------------------------------------------------\ndouble ARDrone::getYaw(void)\n{\n    // Get the data\n    if (mutexNavdata) pthread_mutex_lock(mutexNavdata);\n    double yaw = -navdata.demo.psi * 0.001 * DEG_TO_RAD;\n    if (mutexNavdata) pthread_mutex_unlock(mutexNavdata);\n\n    return yaw;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get current altitude of AR.Drone.\n//! @return  Altitude [m]\n// --------------------------------------------------------------------------\ndouble ARDrone::getAltitude(void)\n{\n    // Get the data\n    if (mutexNavdata) pthread_mutex_lock(mutexNavdata);\n    double altitude = navdata.demo.altitude * 0.001;\n    if (mutexNavdata) pthread_mutex_unlock(mutexNavdata);\n\n    return altitude;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get estimated velocity of AR.Drone.\n//! @param   vx A pointer to the X velocity variable [m/s]\n//! @param   vy A pointer to the Y velocity variable [m/s]\n//! @param   vz A pointer to the Z velocity variable [m/s]\n//! @return Velocity [m/s]\n// --------------------------------------------------------------------------\ndouble ARDrone::getVelocity(double *vx, double *vy, double *vz)\n{\n    // Get the data\n    if (mutexNavdata) pthread_mutex_lock(mutexNavdata);\n    double velocity_x =  navdata.demo.vx * 0.001;\n    double velocity_y = -navdata.demo.vy * 0.001;\n    //double velocity_z = -navdata.demo.vz * 0.001;\n    double velocity_z = -navdata.altitude.altitude_vz * 0.001;\n    if (mutexNavdata) pthread_mutex_unlock(mutexNavdata);\n\n    // Velocities\n    if (vx) *vx = velocity_x;\n    if (vy) *vy = velocity_y;\n    if (vz) *vz = velocity_z;\n\n    // Velocity [m/s]\n    double velocity = sqrt(velocity_x*velocity_x + velocity_y*velocity_y + velocity_z*velocity_z);\n    return velocity;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get GPS position.\n//! @note    This function requires AR.Drone2.0 Flight Recorder\n//! @param   latitude A pointer to the latitude variable [deg]\n//! @param   longitude A pointer to the longitude variable [deg]\n//! @param   elevation A pointer to the elevation variable [deg]\n//! @return  Result of this function\n//! @retval  1 Success\n//! @retval  0 Failure\n// --------------------------------------------------------------------------\nint ARDrone::getPosition(double *latitude, double *longitude, double *elevation)\n{\n    // Get the data\n    if (mutexNavdata) pthread_mutex_lock(mutexNavdata);\n    double gps_latitude  = navdata.gps.lat;\n    double gps_longitude = navdata.gps.lon;\n    double gps_elevation = navdata.gps.elevation;\n    int    available     = navdata.gps.data_available;\n    if (mutexNavdata) pthread_mutex_unlock(mutexNavdata);\n\n    // Positions\n    if (latitude)  *latitude  = gps_latitude;\n    if (longitude) *longitude = gps_longitude;\n    if (elevation) *elevation = gps_elevation;\n\n    return available;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get current battery percentage of AR.Drone.\n//! @return  Battery percentage [%]\n// --------------------------------------------------------------------------\nint ARDrone::getBatteryPercentage(void)\n{\n    // Get the data\n    if (mutexNavdata) pthread_mutex_lock(mutexNavdata);\n    int battery = navdata.demo.vbat_flying_percentage;\n    if (mutexNavdata) pthread_mutex_unlock(mutexNavdata);\n\n    return battery;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Check whether AR.Drone is on ground.\n//! @return  Result of this function\n//! @retval  1 Yes\n//! @retval  0 No\n// --------------------------------------------------------------------------\nint ARDrone::onGround(void)\n{\n    // Get the data\n    if (mutexNavdata) pthread_mutex_lock(mutexNavdata);\n    int on_ground = (navdata.ardrone_state & ARDRONE_FLY_MASK) ? 0 : 1;\n    if (mutexNavdata) pthread_mutex_unlock(mutexNavdata);\n\n    return on_ground;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Finalize Navdata.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::finalizeNavdata(void)\n{\n    // Destroy the thread\n    if (threadNavdata) {\n        pthread_cancel(*threadNavdata);\n        pthread_join(*threadNavdata, NULL);\n        delete threadNavdata;\n        threadNavdata = NULL;\n    }\n\n    // Delete the mutex\n    if (mutexNavdata) {\n        pthread_mutex_destroy(mutexNavdata);\n        delete mutexNavdata;\n        mutexNavdata = NULL;\n    }\n\n    // Close the socket\n    sockNavdata.close();\n}"
  },
  {
    "path": "src/ardrone/tcp.cpp",
    "content": "// -------------------------------------------------------------------------\n// CV Drone (= OpenCV + AR.Drone)\n// Copyright(C) 2016 puku0x\n// https://github.com/puku0x/cvdrone\n//\n// This source file is part of CV Drone library.\n//\n// This library is free software; you can redistribute it and/or\n// modify it under the terms of EITHER:\n// (1) The GNU Lesser General Public License as published by the Free\n//     Software Foundation; either version 2.1 of the License, or (at\n//     your option) any later version. The text of the GNU Lesser\n//     General Public License is included with this library in the\n//     file cvdrone-license-LGPL.txt.\n// (2) The BSD-style license that is included with this library in\n//     the file cvdrone-license-BSD.txt.\n// \n// This library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files\n// cvdrone-license-LGPL.txt and cvdrone-license-BSD.txt for more details.\n//\n//! @file   tcp.cpp\n//! @brief  TCP Socket class\n//\n// -------------------------------------------------------------------------\n\n#include \"ardrone.h\"\n\n// --------------------------------------------------------------------------\n// TCPSocket::TCPSocket()\n// Description : Constructor of TCPSocket class.\n// --------------------------------------------------------------------------\nTCPSocket::TCPSocket()\n{\n    sock = INVALID_SOCKET;\n}\n\n// --------------------------------------------------------------------------\n// TCPSocket::~TCPSocket()\n// Description : Destructor of TCPSocket class.\n// --------------------------------------------------------------------------\nTCPSocket::~TCPSocket()\n{\n    close();\n}\n\n// --------------------------------------------------------------------------\n// TCPSocket::open(IP address, Port number)\n// Description  : Initialize specified  socket.\n// Return value : SUCCESS: 1  FAILURE: 0\n// --------------------------------------------------------------------------\nint TCPSocket::open(const char *addr, int port)\n{\n    #if _WIN32\n    // Initialize WSA\n    WSAData wsaData;\n    WSAStartup(MAKEWORD(1,1), &wsaData);\n    #endif\n\n    // Create a socket\n    sock = socket(AF_INET, SOCK_STREAM, 0);\n    if (sock == INVALID_SOCKET) {\n        printf(\"ERROR: socket() failed. (%s, %d)\\n\", __FILE__, __LINE__);\n        return 0;\n    }\n\n    // Set the port and address of server\n    memset(&server_addr, 0, sizeof(server_addr));\n    server_addr.sin_family = AF_INET;\n    server_addr.sin_port = htons((u_short)port);\n    server_addr.sin_addr.s_addr = inet_addr(addr);\n\n    // Connect the socket\n    if (connect(sock, (sockaddr*)&server_addr, sizeof(server_addr)) == SOCKET_ERROR) {\n        printf(\"ERROR: connect() failed. (%s, %d)\\n\", __FILE__, __LINE__);\n        return 0;\n    }\n\n    // Set timeout\n    int timeoutSec = 0, timeoutUsec = 100000;\n    #ifdef _WIN32\n    int timeout = (1000 * timeoutSec) + (timeoutUsec / 1000);\n    #else\n    struct timeval timeout;\n    timeout.tv_sec = timeoutSec;\n    timeout.tv_usec = timeoutUsec;\n    #endif\n    if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&timeout, sizeof(timeout)) < 0) {\n        printf(\"ERROR: setsockopt() failed. (%s, %d)\\n\", __FILE__, __LINE__);\n        return 0;\n    }\n    if (setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (const char *)&timeout, sizeof(timeout)) < 0) {\n        printf(\"ERROR: setsockopt() failed. (%s, %d)\\n\", __FILE__, __LINE__);\n        return 0;\n    }\n\n    //// Set to non-blocking mode\n    //#if _WIN32\n    //u_long nonblock = 1;\n    //if (ioctlsocket(sock, FIONBIO, &nonblock) == SOCKET_ERROR) {\n    //    printf(\"ERROR: ioctlsocket() failed. (%s, %d)\\n\", __FILE__, __LINE__);  \n    //    return 0;\n    //}\n    //#else\n    //int flag = fcntl(sock, F_GETFL, 0);\n    //if (flag < 0) {\n    //    printf(\"ERROR: fcntl(F_GETFL) failed. (%s, %d)\\n\", __FILE__, __LINE__);  \n    //    return 0;\n    //}\n    //if (fcntl(sock, F_SETFL, flag|O_NONBLOCK) < 0) {\n    //    printf(\"ERROR: fcntl(F_SETFL) failed. (%s, %d)\\n\", __FILE__, __LINE__);  \n    //    return 0;\n    //}\n    //#endif\n\n    // Enable re-use address option\n    int reuse = 1;\n    if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuse, sizeof(reuse)) == SOCKET_ERROR) {\n        printf(\"ERROR: setsockopt() failed. (%s, %d)\\n\", __FILE__, __LINE__);\n        return 0;\n    }\n\n    return 1;\n}\n\n// --------------------------------------------------------------------------\n// TCPSocket::send2(Sending data, Size of data)\n// Description  : Send the specified data.\n// Return value : SUCCESS: Number of sent bytes  FAILURE: 0\n// --------------------------------------------------------------------------\nint TCPSocket::send2(void *data, size_t size)\n{\n    // The socket is invalid\n    if (sock == INVALID_SOCKET) return 0;\n\n    // Send the data\n    int n = (int)send(sock, (char*)data, size, 0);\n    if (n < 1) return 0;\n\n    return n;\n}\n\n// --------------------------------------------------------------------------\n// TCPSocket::sendf(Messages)\n// Description  : Send the data with format.\n// Return value : SUCCESS: Number of sent bytes  FAILURE: 0\n// --------------------------------------------------------------------------\nint TCPSocket::sendf(const char *str, ...)\n{\n    char msg[1024];\n\n    // The socket is invalid\n    if (sock == INVALID_SOCKET) return 0;\n\n    // Apply format \n    va_list arg;\n    va_start(arg, str);\n    vsnprintf(msg, 1024, str, arg);\n    va_end(arg);\n\n    // Send data\n    return send2(msg, (int)strlen(msg) + 1);\n}\n\n// --------------------------------------------------------------------------\n// TCPSocket::receive(Receiving data, Size of data)\n// Description  : Receive the data.\n// Return value : SUCCESS: Number of received bytes  FAILURE: 0\n// --------------------------------------------------------------------------\nint TCPSocket::receive(void *data, size_t size)\n{\n    // The socket is invalid\n    if (sock == INVALID_SOCKET) return 0;\n\n    // Receive data\n    int received = 0;\n    while (received < (int)size) {\n        int n = (int)recv(sock, (char*)data + received, size - received, 0);\n        if (n < 1) break;\n        received += n;\n    }\n\n    return received;\n}\n\n// --------------------------------------------------------------------------\n// TCPSocket::close()\n// Description  : Finalize the socket.\n// Return value : NONE\n// --------------------------------------------------------------------------\nvoid TCPSocket::close(void)\n{\n    // Close the socket\n    if (sock != INVALID_SOCKET) {\n        #if _WIN32\n        closesocket(sock);\n        #else\n        ::close(sock);\n        #endif\n        sock = INVALID_SOCKET;\n    }\n\n    #if _WIN32\n    // Finalize WSA\n    WSACleanup();\n    #endif\n}"
  },
  {
    "path": "src/ardrone/udp.cpp",
    "content": "// -------------------------------------------------------------------------\n// CV Drone (= OpenCV + AR.Drone)\n// Copyright(C) 2016 puku0x\n// https://github.com/puku0x/cvdrone\n//\n// This source file is part of CV Drone library.\n//\n// This library is free software; you can redistribute it and/or\n// modify it under the terms of EITHER:\n// (1) The GNU Lesser General Public License as published by the Free\n//     Software Foundation; either version 2.1 of the License, or (at\n//     your option) any later version. The text of the GNU Lesser\n//     General Public License is included with this library in the\n//     file cvdrone-license-LGPL.txt.\n// (2) The BSD-style license that is included with this library in\n//     the file cvdrone-license-BSD.txt.\n// \n// This library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files\n// cvdrone-license-LGPL.txt and cvdrone-license-BSD.txt for more details.\n//\n//! @file   udp.cpp\n//! @brief  UDP Socket class\n//\n// -------------------------------------------------------------------------\n\n#include \"ardrone.h\"\n\n// --------------------------------------------------------------------------\n// UDPSocket::UDPSocket()\n// Description : Constructor of UDPSocket class.\n// --------------------------------------------------------------------------\nUDPSocket::UDPSocket()\n{\n    sock = INVALID_SOCKET;\n}\n\n// --------------------------------------------------------------------------\n// UDPSocket::~UDPSocket()\n// Description : Destructor of UDPSocket class.\n// --------------------------------------------------------------------------\nUDPSocket::~UDPSocket()\n{\n    close();\n}\n\n// --------------------------------------------------------------------------\n// UDPSocket::open(IP address, Port number)\n// Description  : Initialize specified  socket.\n// Return value : SUCCESS: 1  FAILURE: 0\n// --------------------------------------------------------------------------\nint UDPSocket::open(const char *addr, int port)\n{\n    #if _WIN32\n    // Initialize WSA\n    WSAData wsaData;\n    WSAStartup(MAKEWORD(1,1), &wsaData);\n    #endif\n\n    // Create a socket\n    sock = socket(AF_INET, SOCK_DGRAM, 0);\n    if (sock == INVALID_SOCKET) {\n        printf(\"ERROR: socket() failed. (%s, %d)\\n\", __FILE__, __LINE__);     \n        return 0;\n    }\n\n    // Set the port and address of server\n    memset(&server_addr, 0, sizeof(server_addr));\n    server_addr.sin_family = AF_INET;\n    server_addr.sin_port = htons((u_short)port);\n    server_addr.sin_addr.s_addr = inet_addr(addr);\n\n    // Set the port and address of client\n    memset(&client_addr, 0, sizeof(client_addr));\n    client_addr.sin_family = AF_INET;\n    client_addr.sin_port = htons(0);\n    client_addr.sin_addr.s_addr = htonl(INADDR_ANY);\n\n    // Bind the socket\n    if (bind(sock, (sockaddr*)&client_addr, sizeof(client_addr)) == SOCKET_ERROR) {\n        printf(\"ERROR: bind() failed. (%s, %d)\\n\", __FILE__, __LINE__);  \n        return 0;\n    }\n\n    //// Set to non-blocking mode\n    //#if _WIN32\n    //u_long nonblock = 1;\n    //if (ioctlsocket(sock, FIONBIO, &nonblock) == SOCKET_ERROR) {\n    //    printf(\"ERROR: ioctlsocket() failed. (%s, %d)\\n\", __FILE__, __LINE__);  \n    //    return 0;\n    //}\n    //#else\n    //int flag = fcntl(sock, F_GETFL, 0);\n    //if (flag < 0) {\n    //    printf(\"ERROR: fcntl(F_GETFL) failed. (%s, %d)\\n\", __FILE__, __LINE__);  \n    //    return 0;\n    //}\n    //if (fcntl(sock, F_SETFL, flag|O_NONBLOCK) < 0) {\n    //    printf(\"ERROR: fcntl(F_SETFL) failed. (%s, %d)\\n\", __FILE__, __LINE__);  \n    //    return 0;\n    //}\n    //#endif\n\n    // Enable re-use address option\n    int reuse = 1;\n    if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuse, sizeof(reuse)) == SOCKET_ERROR) {\n        printf(\"ERROR: setsockopt() failed. (%s, %d)\\n\", __FILE__, __LINE__);\n        return 0;\n    }\n\n    return 1;\n}\n\n// --------------------------------------------------------------------------\n// UDPSocket:::send2(Sending data, Size of data)\n// Description  : Send the specified data.\n// Return value : SUCCESS: Number of sent bytes  FAILURE: 0\n// --------------------------------------------------------------------------\nint UDPSocket::send2(void *data, size_t size)\n{\n    // The socket is invalid\n    if (sock == INVALID_SOCKET) return 0;\n\n    // Send data\n    int n = (int)sendto(sock, (char*)data, size, 0, (sockaddr*)&server_addr, sizeof(server_addr));\n    if (n < 1) return 0;\n\n    return n;\n}\n\n// --------------------------------------------------------------------------\n// UDPSocket::sendf(Messages)\n// Description  : Send the data with format.\n// Return value : SUCCESS: Number of sent bytes  FAILURE: 0\n// --------------------------------------------------------------------------\nint UDPSocket::sendf(const char *str, ...)\n{\n    char msg[1024];\n\n    // The socket is invalid\n    if (sock == INVALID_SOCKET) return 0;\n\n    // Apply format \n    va_list arg;\n    va_start(arg, str);\n    vsnprintf(msg, 1024, str, arg);\n    va_end(arg);\n      \n    // Send data\n    return send2(msg, (int)strlen(msg) + 1);\n}\n\n// --------------------------------------------------------------------------\n// UDPSocket::receive(Receiving data, Size of data)\n// Description  : Receive the data.\n// Return value : SUCCESS: Number of received bytes  FAILURE: 0\n// --------------------------------------------------------------------------\nint UDPSocket::receive(void *data, size_t size)\n{\n    // The socket is invalid.\n    if (sock == INVALID_SOCKET) return 0;\n\n    // Receive data\n    sockaddr_in addr;\n    socklen_t len = sizeof(addr);\n    int n = (int)recvfrom(sock, (char*)data, size, 0, (sockaddr*)&addr, &len);\n    if (n < 1) return 0;\n\n    // Server has the same IP address of client\n    //if (addr.sin_addr.S_un.S_addr != server_addr.sin_addr.S_un.S_addr) return 0;\n\n    return n;\n}\n\n// --------------------------------------------------------------------------\n// UDPSocket::close()\n// Description  : Finalize the socket.\n// Return value : NONE\n// --------------------------------------------------------------------------\nvoid UDPSocket::close(void)\n{\n    // Close the socket\n    if (sock != INVALID_SOCKET) {\n        #if _WIN32\n        closesocket(sock);\n        #else\n        ::close(sock);\n        #endif\n        sock = INVALID_SOCKET;\n    }\n\n    #if _WIN32\n    // Finalize WSA\n    WSACleanup();\n    #endif\n}"
  },
  {
    "path": "src/ardrone/uvlc.h",
    "content": "#ifndef __HEADER_UVLC__\r\n#define __HEADER_UVLC__\r\n\r\n//#region Copyright Notice\r\n\r\n//Copyright  2007-2011, PARROT SA, all rights reserved. \r\n\r\n//DISCLAIMER \r\n//The APIs is provided by PARROT and contributors \"AS IS\" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability \r\n//and fitness for a particular purpose are disclaimed. In no event shall PARROT and contributors be liable for any direct, indirect, incidental, special, exemplary, or \r\n//consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however \r\n//caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this \r\n//software, even if advised of the possibility of such damage. \r\n\r\n//Author            : Daniel Schmidt\r\n//Publishing date   : 2010-01-06 \r\n//based on work by  : Wilke Jansoone\r\n\r\n//Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions\r\n//are met:\r\n//- Redistributions of source code must retain the above copyright notice, this list of conditions, the disclaimer and the original author of the source code.\r\n//- Neither the name of the PixVillage Team, nor the names of its contributors may be used to endorse or promote products derived from this software without \r\n//specific prior written permission.\r\n\r\n////#endregion\r\n\r\n////#region Imports\r\n\r\n#include <inttypes.h>\r\n\r\nnamespace UVLC {\r\n    const int BLOCK_WIDTH = 8;\r\n    const int CIF_WIDTH   = 88;\r\n    const int CIG_HEIGHT  = 72;\r\n    const int VGA_WIDTH   = 160;\r\n    const int VGA_HEIGHT  = 120;\r\n    const int CIF         = 1;\r\n    const int QVGA        = 2;\r\n    const int TABLE_QUANTIZATION_MODE = 31;\r\n    const int16_t ZIGZAG_POSITIONS[] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63, };\r\n    const int16_t QUANTIZER_VALUES[] = { 3, 5, 7, 9, 11, 13, 15, 17, 5, 7, 9, 11, 13, 15, 17, 19, 7, 9, 11, 13, 15, 17, 19, 21, 9, 11, 13, 15, 17, 19, 21, 23, 11, 13, 15, 17, 19, 21, 23, 25, 13, 15, 17, 19, 21, 23, 25, 27, 15, 17, 19, 21, 23, 25, 27, 29, 17, 19, 21, 23, 25, 27, 29, 31 };\r\n    const uint8_t CLZLUT[] = { 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\r\n\r\n    class MacroBlock {\r\n    public:\r\n        int16_t *DataBlocks[6];\r\n        MacroBlock(void);\r\n        ~MacroBlock(void);\r\n    };\r\n\r\n    class ImageSlice {\r\n    public:\r\n        int Count;\r\n        MacroBlock *MacroBlocks;\r\n        ImageSlice(int);\r\n        ~ImageSlice(void);\r\n    };\r\n\r\n    MacroBlock::MacroBlock(void) {\r\n        for (int i = 0; i < 6; i++) this->DataBlocks[i] = new int16_t[64];\r\n    }\r\n\r\n    MacroBlock::~MacroBlock(void) {\r\n        for (int i = 0; i < 6; i++) delete [] this->DataBlocks[i];\r\n    }\r\n\r\n    ImageSlice::ImageSlice(int macroBlockCount) {\r\n        this->Count = macroBlockCount;\r\n        this->MacroBlocks = new MacroBlock[macroBlockCount];\r\n    }\r\n\r\n    ImageSlice::~ImageSlice(void) {\r\n        delete [] this->MacroBlocks;\r\n    }\r\n\r\n    uint32_t PeekStreamData(uint8_t *stream, int stream_size, int streamIndex, int streamField, int streamFieldBitIndex, int count)\r\n    {\r\n        uint32_t data = 0;\r\n        uint32_t _streamField = (uint32_t)streamField;\r\n        int _streamFieldBitIndex = streamFieldBitIndex;\r\n\r\n        while (count > (32 - _streamFieldBitIndex) && streamIndex < (stream_size >> 2)) {\r\n            data = (data << (int)(32 - _streamFieldBitIndex)) | (_streamField >> _streamFieldBitIndex);\r\n            count -= 32 - _streamFieldBitIndex;\r\n            _streamField = ((stream[streamIndex * 4] & 0xFF) | ((stream[streamIndex * 4 + 1] & 0xFF) << 8) | ((stream[streamIndex * 4 + 2] & 0xFF) << 16) | ((stream[streamIndex * 4 + 3] & 0xFF) << 24));\r\n            _streamFieldBitIndex = 0;\r\n        }\r\n\r\n        if (count > 0) data = (data << count) | (_streamField >> (32 - count));\r\n        return data;\r\n    }\r\n\r\n    int ReadStreamData(uint8_t *stream, int stream_size, int *streamIndex, int *streamField, int *streamFieldBitIndex, int count)\r\n    {\r\n        int data = 0;\r\n        while (count > (32 - *streamFieldBitIndex)) {\r\n            data = data << (32 - *streamFieldBitIndex) | ((uint32_t)(*streamField) >> *streamFieldBitIndex);\r\n            count -= 32 - *streamFieldBitIndex;\r\n            *streamField = ((stream[*streamIndex * 4] & 0xFF) | ((stream[*streamIndex * 4 + 1] & 0xFF) << 8) | ((stream[*streamIndex * 4 + 2] & 0xFF) << 16) | ((stream[*streamIndex * 4 + 3] & 0xFF) << 24));\r\n            *streamFieldBitIndex = 0;\r\n            *streamIndex += 1;\r\n        }\r\n        if (count > 0) {\r\n            data = (data << count) | ((uint32_t)(*streamField) >> (32 - count));\r\n            *streamField <<= count;\r\n            *streamFieldBitIndex += count;\r\n        }\r\n        return data;\r\n    }\r\n\r\n    void AlignStreamData(int *streamField, int *streamFieldBitIndex)\r\n    {\r\n        int alignedLength;\r\n        int actualLength = *streamFieldBitIndex;\r\n\r\n        if (actualLength > 0) {\r\n            alignedLength = (actualLength & ~7);\r\n            if (alignedLength != actualLength) {\r\n                alignedLength += 0x08;\r\n                *streamField <<= (alignedLength - actualLength);\r\n                *streamFieldBitIndex = alignedLength;\r\n            }\r\n        }\r\n    }\r\n\r\n    bool DecodeFieldBytes(uint8_t *stream, int stream_size, int *streamIndex, int *streamField, int *streamFieldBitIndex, int *run, int *level)\r\n    {\r\n        bool last = false;\r\n        int streamLength, temp;\r\n        uint32_t streamCode = PeekStreamData(stream, stream_size, *streamIndex, *streamField, *streamFieldBitIndex, 32);\r\n        int zeroCount = CLZLUT[streamCode >> 24];\r\n        if (zeroCount == 8) {\r\n            zeroCount += CLZLUT[(streamCode >> 16) & 0xFF];\r\n            if (zeroCount == 16) {\r\n                zeroCount += CLZLUT[(streamCode >> 8) & 0xFF];\r\n                if (zeroCount == 24) {\r\n                    zeroCount += CLZLUT[streamCode & 0xFF];\r\n                }\r\n            }\r\n        }\r\n\r\n        if (zeroCount > 1) {\r\n            temp = (streamCode << (zeroCount + 1)) >> (32 - (zeroCount - 1));\r\n            streamCode <<= 2*zeroCount;\r\n            streamLength = 2*zeroCount;\r\n            *run = temp + (1 << (zeroCount - 1));\r\n        }\r\n        else {\r\n            streamCode <<= (zeroCount + 1);\r\n            streamLength = zeroCount + 1;\r\n            *run = zeroCount;\r\n        }\r\n\r\n        zeroCount = CLZLUT[streamCode >> 24];\r\n        if (zeroCount == 8) {\r\n            zeroCount += CLZLUT[(streamCode >> 16) & 0xFF];\r\n            if (zeroCount == 16) {\r\n                zeroCount += CLZLUT[(streamCode >> 8) & 0xFF];\r\n                if (zeroCount == 24) {\r\n                    zeroCount += CLZLUT[streamCode & 0xFF];\r\n                }\r\n            }\r\n        }\r\n\r\n        if (zeroCount == 1) {\r\n            streamCode <<= 2;\r\n            streamLength += 2;\r\n            last = true;\r\n        }\r\n        else {\r\n            if (zeroCount == 0) {\r\n                streamLength += 2;\r\n                streamCode = (streamCode << 1) >> 31;\r\n                temp = (streamCode >> 1) + 1;\r\n            }\r\n            else {\r\n                streamLength += 2*zeroCount + 1;\r\n                streamCode = (streamCode << (zeroCount + 1)) >> (32 - zeroCount);\r\n                temp = streamCode >> 1;\r\n                temp += (int)(1 << (zeroCount - 1));\r\n            }\r\n\r\n            int sign = streamCode & 1;\r\n\r\n            *level = (sign == 1) ? -temp : temp;\r\n            last = false;\r\n        }\r\n\r\n        ReadStreamData(stream, stream_size, streamIndex, streamField, streamFieldBitIndex, streamLength);\r\n        return last;\r\n    }\r\n\r\n    void GetBlockBytes(uint8_t *stream, int stream_size, int16_t *dataBlockBuffer, int dataBlockBufferLength, int *streamIndex, int *streamField, int *streamFieldBitIndex, int quantizerMode, bool acCoefficientsAvailable)\r\n    {\r\n        bool last = false;\r\n        int run, level;\r\n        int zigZagPosition = 0;\r\n        int matrixPosition = 0;\r\n\r\n        memset(dataBlockBuffer, 0, dataBlockBufferLength*sizeof(int16_t));\r\n\r\n        int dcCoefficientTemp = ReadStreamData(stream, stream_size, streamIndex, streamField, streamFieldBitIndex, 10);\r\n\r\n        if (quantizerMode == TABLE_QUANTIZATION_MODE) {\r\n            dataBlockBuffer[0] = (int16_t)(dcCoefficientTemp * QUANTIZER_VALUES[0]);\r\n\r\n            if (acCoefficientsAvailable) {\r\n                last = DecodeFieldBytes(stream, stream_size, streamIndex, streamField, streamFieldBitIndex, &run, &level);\r\n\r\n                while (!last) {\r\n                    zigZagPosition += run + 1;\r\n                    matrixPosition = ZIGZAG_POSITIONS[zigZagPosition];\r\n                    level *= QUANTIZER_VALUES[matrixPosition];\r\n                    dataBlockBuffer[matrixPosition] = (int16_t)level;\r\n                    last = DecodeFieldBytes(stream, stream_size, streamIndex, streamField, streamFieldBitIndex, &run, &level);\r\n                }\r\n            }\r\n        }\r\n        else {\r\n            // Currently not implemented.\r\n            uint16_t exception = -1;\r\n        }\r\n    }\r\n\r\n    void InverseTransform(int16_t *src, int16_t *dst)\r\n    {\r\n        const int FIX_0_298631336 = 2446;\r\n        const int FIX_0_390180644 = 3196;\r\n        const int FIX_0_541196100 = 4433;\r\n        const int FIX_0_765366865 = 6270;\r\n        const int FIX_0_899976223 = 7373;\r\n        const int FIX_1_175875602 = 9633;\r\n        const int FIX_1_501321110 = 12299;\r\n        const int FIX_1_847759065 = 15137;\r\n        const int FIX_1_961570560 = 16069;\r\n        const int FIX_2_053119869 = 16819;\r\n        const int FIX_2_562915447 = 20995;\r\n        const int FIX_3_072711026 = 25172;\r\n        const int BITS = 13;\r\n        const int PASS1_BITS = 1;\r\n        const int F1 = BITS - PASS1_BITS - 1;\r\n        const int F2 = BITS - PASS1_BITS;\r\n        const int F3 = BITS + PASS1_BITS + 3;\r\n        int z1, z2, z3, z4, z5;\r\n        int tmp0, tmp1, tmp2, tmp3;\r\n        int tmp10, tmp11, tmp12, tmp13;\r\n        int pointer;\r\n        int workSpace[64];\r\n\r\n        for (pointer = 0; pointer < 8; pointer++) {\r\n            if (src[pointer + 8] == 0 && src[pointer + 16] == 0 && src[pointer + 24] == 0 && src[pointer + 32] == 0 && src[pointer + 40] == 0 && src[pointer + 48] == 0 && src[pointer + 56] == 0) {\r\n                int dcValue = src[pointer] << PASS1_BITS;\r\n                workSpace[pointer +  0] = dcValue;\r\n                workSpace[pointer +  8] = dcValue;\r\n                workSpace[pointer + 16] = dcValue;\r\n                workSpace[pointer + 24] = dcValue;\r\n                workSpace[pointer + 32] = dcValue;\r\n                workSpace[pointer + 40] = dcValue;\r\n                workSpace[pointer + 48] = dcValue;\r\n                workSpace[pointer + 56] = dcValue;\r\n            }\r\n            else {\r\n                z2 = src[pointer + 16];\r\n                z3 = src[pointer + 48];\r\n\r\n                z1 = (z2 + z3) * FIX_0_541196100;\r\n                tmp2 = z1 + z3 * -FIX_1_847759065;\r\n                tmp3 = z1 + z2 * FIX_0_765366865;\r\n\r\n                z2 = src[pointer];\r\n                z3 = src[pointer + 32];\r\n\r\n                tmp0 = (z2 + z3) << BITS;\r\n                tmp1 = (z2 - z3) << BITS;\r\n\r\n                tmp10 = tmp0 + tmp3;\r\n                tmp13 = tmp0 - tmp3;\r\n                tmp11 = tmp1 + tmp2;\r\n                tmp12 = tmp1 - tmp2;\r\n\r\n                tmp0 = src[pointer + 56];\r\n                tmp1 = src[pointer + 40];\r\n                tmp2 = src[pointer + 24];\r\n                tmp3 = src[pointer + 8];\r\n\r\n                z1 = tmp0 + tmp3;\r\n                z2 = tmp1 + tmp2;\r\n                z3 = tmp0 + tmp2;\r\n                z4 = tmp1 + tmp3;\r\n                z5 = (z3 + z4) * FIX_1_175875602;\r\n\r\n                tmp0 = tmp0 * FIX_0_298631336;\r\n                tmp1 = tmp1 * FIX_2_053119869;\r\n                tmp2 = tmp2 * FIX_3_072711026;\r\n                tmp3 = tmp3 * FIX_1_501321110;\r\n                z1 = z1 * -FIX_0_899976223;\r\n                z2 = z2 * -FIX_2_562915447;\r\n                z3 = z3 * -FIX_1_961570560;\r\n                z4 = z4 * -FIX_0_390180644;\r\n\r\n                z3 += z5;\r\n                z4 += z5;\r\n\r\n                tmp0 += z1 + z3;\r\n                tmp1 += z2 + z4;\r\n                tmp2 += z2 + z3;\r\n                tmp3 += z1 + z4;\r\n\r\n                workSpace[pointer +  0] = ((tmp10 + tmp3 + (1 << F1)) >> F2);\r\n                workSpace[pointer + 56] = ((tmp10 - tmp3 + (1 << F1)) >> F2);\r\n                workSpace[pointer +  8] = ((tmp11 + tmp2 + (1 << F1)) >> F2);\r\n                workSpace[pointer + 48] = ((tmp11 - tmp2 + (1 << F1)) >> F2);\r\n                workSpace[pointer + 16] = ((tmp12 + tmp1 + (1 << F1)) >> F2);\r\n                workSpace[pointer + 40] = ((tmp12 - tmp1 + (1 << F1)) >> F2);\r\n                workSpace[pointer + 24] = ((tmp13 + tmp0 + (1 << F1)) >> F2);\r\n                workSpace[pointer + 32] = ((tmp13 - tmp0 + (1 << F1)) >> F2);\r\n            }\r\n        }\r\n\r\n        for (pointer = 0; pointer < 64; pointer += 8) {\r\n            z2 = workSpace[pointer + 2];\r\n            z3 = workSpace[pointer + 6];\r\n\r\n            z1 = (z2 + z3) * FIX_0_541196100;\r\n            tmp2 = z1 + z3 * -FIX_1_847759065;\r\n            tmp3 = z1 + z2 * FIX_0_765366865;\r\n\r\n            z1 = workSpace[pointer];\r\n            z2 = workSpace[pointer + 4];\r\n\r\n            tmp0 = (z1 + z2) << BITS;\r\n            tmp1 = (z1 - z2) << BITS;\r\n\r\n            tmp10 = tmp0 + tmp3;\r\n            tmp13 = tmp0 - tmp3;\r\n            tmp11 = tmp1 + tmp2;\r\n            tmp12 = tmp1 - tmp2;\r\n\r\n            tmp3 = workSpace[pointer + 1];\r\n            tmp2 = workSpace[pointer + 3];\r\n            tmp1 = workSpace[pointer + 5];\r\n            tmp0 = workSpace[pointer + 7];\r\n\r\n            z1 = (tmp0 + tmp3) * -FIX_0_899976223;\r\n            z2 = (tmp1 + tmp2) * -FIX_2_562915447;\r\n            z3 = tmp0 + tmp2;\r\n            z4 = tmp1 + tmp3;\r\n\r\n            z5 = (z3 + z4) * FIX_1_175875602;\r\n\r\n            z3 = (z3 * -FIX_1_961570560) + z5;\r\n            z4 = (z4 * -FIX_0_390180644) + z5;\r\n\r\n            tmp0 = (tmp0 * FIX_0_298631336) + z1 + z3;\r\n            tmp1 = (tmp1 * FIX_2_053119869) + z2 + z4;\r\n            tmp2 = (tmp2 * FIX_3_072711026) + z2 + z3;\r\n            tmp3 = (tmp3 * FIX_1_501321110) + z1 + z4;\r\n\r\n            dst[pointer + 0] = (int16_t)((tmp10 + tmp3) >> F3);\r\n            dst[pointer + 1] = (int16_t)((tmp11 + tmp2) >> F3);\r\n            dst[pointer + 2] = (int16_t)((tmp12 + tmp1) >> F3);\r\n            dst[pointer + 3] = (int16_t)((tmp13 + tmp0) >> F3);\r\n            dst[pointer + 4] = (int16_t)((tmp13 - tmp0) >> F3);\r\n            dst[pointer + 5] = (int16_t)((tmp12 - tmp1) >> F3);\r\n            dst[pointer + 6] = (int16_t)((tmp11 - tmp2) >> F3);\r\n            dst[pointer + 7] = (int16_t)((tmp10 - tmp3) >> F3);\r\n        }\r\n    }\r\n\r\n    inline int Saturate5(int x)\r\n    {\r\n        if (x < 0) x = 0;\r\n        x >>= 11;\r\n        return (x > 0x1F) ? 0x1F : x;\r\n    }\r\n\r\n    inline int Saturate6(int x)\r\n    {\r\n        if (x < 0) x = 0;\r\n        x >>= 10;\r\n        return x > 0x3F ? 0x3F : x;\r\n    }\r\n\r\n    void ComposeImageSlice(ImageSlice *imageSlice, int sliceIndex, uint16_t *javaPixelData, int width, int height)\r\n    {\r\n        int pixelDataQuadrantOffsets[] = {0, BLOCK_WIDTH, width * BLOCK_WIDTH, (width * BLOCK_WIDTH) + BLOCK_WIDTH};\r\n        int imageDataOffset = (sliceIndex - 1) * width * 16;\r\n        const int cromaQuadrantOffsets[] = {0, 4, 32, 36};\r\n\r\n        for (int i = 0; i < imageSlice->Count; i++) {\r\n            MacroBlock *macroBlock = &(imageSlice->MacroBlocks[i]);\r\n\r\n            for (int verticalStep = 0; verticalStep < BLOCK_WIDTH / 2; verticalStep++) {\r\n                int chromaOffset = verticalStep * BLOCK_WIDTH;\r\n                int lumaElementIndex1 = verticalStep * BLOCK_WIDTH * 2;\r\n                int lumaElementIndex2 = lumaElementIndex1 + BLOCK_WIDTH;\r\n                int dataIndex1 = imageDataOffset + (2 * verticalStep * width);\r\n                int dataIndex2 = dataIndex1 + width;\r\n\r\n                for (int horizontalStep = 0; horizontalStep < BLOCK_WIDTH / 2; horizontalStep++) {\r\n                    for (int quadrant = 0; quadrant < 4; quadrant++) {\r\n                        int chromaIndex = chromaOffset + cromaQuadrantOffsets[quadrant] + horizontalStep;\r\n                        int chromaBlueValue = macroBlock->DataBlocks[4][chromaIndex];\r\n                        int chromaRedValue = macroBlock->DataBlocks[5][chromaIndex];\r\n\r\n                        int u = chromaBlueValue - 128;\r\n                        int ug = 88 * u;\r\n                        int ub = 454 * u;\r\n\r\n                        int v = chromaRedValue - 128;\r\n                        int vg = 183 * v;\r\n                        int vr = 359 * v;\r\n\r\n                        for (int pixel = 0; pixel < 2; pixel++) {\r\n                            int r, g, b;\r\n                            int deltaIndex = 2 * horizontalStep + pixel;\r\n                            int lumaElementValue1 = macroBlock->DataBlocks[quadrant][lumaElementIndex1 + deltaIndex] << 8;\r\n                            int lumaElementValue2 = macroBlock->DataBlocks[quadrant][lumaElementIndex2 + deltaIndex] << 8;\r\n                            r = Saturate5(lumaElementValue1 + vr);\r\n                            g = Saturate6(lumaElementValue1 - ug - vg);\r\n                            b = Saturate5(lumaElementValue1 + ub);\r\n                            javaPixelData[dataIndex1 + pixelDataQuadrantOffsets[quadrant] + deltaIndex] = (uint16_t)((r << 11) | (g << 5) | b);\r\n                            r = Saturate5(lumaElementValue2 + vr);\r\n                            g = Saturate6(lumaElementValue2 - ug - vg);\r\n                            b = Saturate5(lumaElementValue2 + ub);\r\n                            javaPixelData[dataIndex2 + pixelDataQuadrantOffsets[quadrant] + deltaIndex] = (uint16_t)((r << 11) | (g << 5) | b);\r\n                        }\r\n                    }\r\n                }\r\n            }\r\n            imageDataOffset += 16;\r\n        }\r\n    }\r\n\r\n    void DecodeVideo(uint8_t *stream, int stream_size, uint8_t *img, int *width, int *height)\r\n    {\r\n        int gob = 0;\r\n        int pictureFormat;\r\n        int resolution;\r\n        int pictureType;\r\n        int quantizerMode;\r\n        int sliceCount;\r\n        int blockCount;\r\n        int frameIndex;\r\n        int streamField = 0;\r\n        int streamFieldBitIndex = 32;\r\n        int streamIndex = 0;\r\n        int sliceIndex = 0;\r\n        bool pictureComplete = false;\r\n        ImageSlice *imageSlice = NULL;\r\n        uint16_t *javaPixelData = NULL;\r\n        const int dataBlockBufferLength = 64;\r\n        int16_t dataBlockBuffer[dataBlockBufferLength];\r\n        bool blockY0HasAcComponents = false;\r\n        bool blockY1HasAcComponents = false;\r\n        bool blockY2HasAcComponents = false;\r\n        bool blockY3HasAcComponents = false;\r\n        bool blockCbHasAcComponents = false;\r\n        bool blockCrHasAcComponents = false;\r\n\r\n        while (!pictureComplete && streamIndex < (stream_size >> 2)) {\r\n            // \r\n            AlignStreamData(&streamField, &streamFieldBitIndex);\r\n\r\n            // Picture start code\r\n            int code = ReadStreamData(stream, stream_size, &streamIndex, &streamField, &streamFieldBitIndex, 22);\r\n            int startCode = code & (~0x1F);\r\n\r\n            if (startCode == 32) {\r\n                if ((code & 0x1F) == 0x1F) {\r\n                    pictureComplete = true;\r\n                }\r\n                else {\r\n                    if (sliceIndex++ == 0) {\r\n                        pictureFormat = ReadStreamData(stream, stream_size, &streamIndex, &streamField, &streamFieldBitIndex, 2);\r\n                        resolution    = ReadStreamData(stream, stream_size, &streamIndex, &streamField, &streamFieldBitIndex, 3);\r\n                        pictureType   = ReadStreamData(stream, stream_size, &streamIndex, &streamField, &streamFieldBitIndex, 3);\r\n                        quantizerMode = ReadStreamData(stream, stream_size, &streamIndex, &streamField, &streamFieldBitIndex, 5);\r\n                        frameIndex    = ReadStreamData(stream, stream_size, &streamIndex, &streamField, &streamFieldBitIndex, 32);\r\n\r\n                        switch (pictureFormat) {\r\n                        case CIF:\r\n                            *width = CIF_WIDTH << (resolution - 1);\r\n                            *height = CIG_HEIGHT << (resolution - 1);\r\n                            break;\r\n                        case QVGA:\r\n                            *width = VGA_WIDTH << (resolution - 1);\r\n                            *height = VGA_HEIGHT << (resolution - 1);\r\n                            break;\r\n                        }\r\n\r\n                        // We assume two bytes per pixel (RGB 565)\r\n                        sliceCount = (*height) >> 4;\r\n                        blockCount = (*width) >> 4;\r\n\r\n                        if (imageSlice == NULL) imageSlice = new ImageSlice(blockCount);\r\n                        if (javaPixelData == NULL) javaPixelData = new uint16_t[(*width) * (*height)];\r\n                    }\r\n                    else quantizerMode = ReadStreamData(stream, stream_size, &streamIndex, &streamField, &streamFieldBitIndex, 5);\r\n                }\r\n            }\r\n\r\n            // \r\n            if (!pictureComplete) {\r\n                for (int count = 0; count < blockCount; count++) {\r\n                    int macroBlockEmpty = ReadStreamData(stream, stream_size, &streamIndex, &streamField, &streamFieldBitIndex, 1);\r\n                    if (macroBlockEmpty == 0) {\r\n                        int acCoefficientsTemp = ReadStreamData(stream, stream_size, &streamIndex, &streamField, &streamFieldBitIndex, 8);\r\n                        blockY0HasAcComponents = (acCoefficientsTemp >> 0 & 1) == 1;\r\n                        blockY1HasAcComponents = (acCoefficientsTemp >> 1 & 1) == 1;\r\n                        blockY2HasAcComponents = (acCoefficientsTemp >> 2 & 1) == 1;\r\n                        blockY3HasAcComponents = (acCoefficientsTemp >> 3 & 1) == 1;\r\n                        blockCbHasAcComponents = (acCoefficientsTemp >> 4 & 1) == 1;\r\n                        blockCrHasAcComponents = (acCoefficientsTemp >> 5 & 1) == 1;\r\n\r\n                        if ((acCoefficientsTemp >> 6 & 1) == 1) {\r\n                            int quantizer_modeTemp = ReadStreamData(stream, stream_size, &streamIndex, &streamField, &streamFieldBitIndex, 2);\r\n                            quantizerMode = (int) ((quantizer_modeTemp < 2) ? ~quantizer_modeTemp : quantizer_modeTemp);\r\n                        }\r\n\r\n                        GetBlockBytes(stream, stream_size, dataBlockBuffer, dataBlockBufferLength, &streamIndex, &streamField, &streamFieldBitIndex, quantizerMode, blockY0HasAcComponents);\r\n                        InverseTransform(dataBlockBuffer, imageSlice->MacroBlocks[count].DataBlocks[0]);\r\n                        GetBlockBytes(stream, stream_size, dataBlockBuffer, dataBlockBufferLength, &streamIndex, &streamField, &streamFieldBitIndex, quantizerMode, blockY1HasAcComponents);\r\n                        InverseTransform(dataBlockBuffer, imageSlice->MacroBlocks[count].DataBlocks[1]);\r\n                        GetBlockBytes(stream, stream_size, dataBlockBuffer, dataBlockBufferLength, &streamIndex, &streamField, &streamFieldBitIndex, quantizerMode, blockY2HasAcComponents);\r\n                        InverseTransform(dataBlockBuffer, imageSlice->MacroBlocks[count].DataBlocks[2]);\r\n                        GetBlockBytes(stream, stream_size, dataBlockBuffer, dataBlockBufferLength, &streamIndex, &streamField, &streamFieldBitIndex, quantizerMode, blockY3HasAcComponents);\r\n                        InverseTransform(dataBlockBuffer, imageSlice->MacroBlocks[count].DataBlocks[3]);\r\n                        GetBlockBytes(stream, stream_size, dataBlockBuffer, dataBlockBufferLength, &streamIndex, &streamField, &streamFieldBitIndex, quantizerMode, blockCbHasAcComponents);\r\n                        InverseTransform(dataBlockBuffer, imageSlice->MacroBlocks[count].DataBlocks[4]);\r\n                        GetBlockBytes(stream, stream_size, dataBlockBuffer, dataBlockBufferLength, &streamIndex, &streamField, &streamFieldBitIndex, quantizerMode, blockCrHasAcComponents);\r\n                        InverseTransform(dataBlockBuffer, imageSlice->MacroBlocks[count].DataBlocks[5]);\r\n                    }\r\n                }\r\n\r\n                // Compose image slice\r\n                ComposeImageSlice(imageSlice, sliceIndex, javaPixelData, *width, *height);\r\n            }\r\n        }\r\n\r\n        // Convert 16bit pixel data to 8bit RGB\r\n        for(int i = 0; i < (*width) * (*height); i++) {\r\n            uint8_t r = (javaPixelData[i] & 0xF800) >> 11;\r\n            uint8_t g = (javaPixelData[i] & 0x7E0) >> 5;\r\n            uint8_t b = (javaPixelData[i] & 0x1F);\r\n            *(img + i*3+0) = b << 3;\r\n            *(img + i*3+1) = g << 2;\r\n            *(img + i*3+2) = r << 3;\r\n        }  \r\n\r\n        // Release memory\r\n        if (imageSlice) delete imageSlice;\r\n        if (javaPixelData) delete [] javaPixelData;\r\n    }\r\n};\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/ardrone/version.cpp",
    "content": "// -------------------------------------------------------------------------\n// CV Drone (= OpenCV + AR.Drone)\n// Copyright(C) 2016 puku0x\n// https://github.com/puku0x/cvdrone\n//\n// This source file is part of CV Drone library.\n//\n// This library is free software; you can redistribute it and/or\n// modify it under the terms of EITHER:\n// (1) The GNU Lesser General Public License as published by the Free\n//     Software Foundation; either version 2.1 of the License, or (at\n//     your option) any later version. The text of the GNU Lesser\n//     General Public License is included with this library in the\n//     file cvdrone-license-LGPL.txt.\n// (2) The BSD-style license that is included with this library in\n//     the file cvdrone-license-BSD.txt.\n// \n// This library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files\n// cvdrone-license-LGPL.txt and cvdrone-license-BSD.txt for more details.\n//\n//! @file   version.cpp\n//! @brief  Version check using FTP\n//\n// -------------------------------------------------------------------------\n\n#include \"ardrone.h\"\n\n// --------------------------------------------------------------------------\n//! @brief   Get the version information via FTP.\n//! @return  Result of initialization\n//! @retval  1 Success\n//! @retval  0 Failure\n// --------------------------------------------------------------------------\nint ARDrone::getVersionInfo(void)\n{\n    TCPSocket socket1, socket2;\n\n    // Open the IP address and port\n    if (!socket1.open(ip, ARDRONE_FTP_PORT)) {\n        CVDRONE_ERROR(\"TCPSocket::open(port=%d) failed. (%s, %d)\\n\", ARDRONE_FTP_PORT, __FILE__, __LINE__);\n        return 0;\n    }\n\n    // Welcome message\n    const size_t len = 1024;\n    char buf[len] = {'\\0'};\n    socket1.receive(buf, len);\n\n    // Log in as anonymous\n    socket1.sendf(\"USER %s\\r\\n\\0\", \"anonymous\");\n    socket1.receive(buf, len);\n\n    // Set to PASV mode\n    int a, b, c, dataport;\n    socket1.sendf(\"PASV\\r\\n\\0\");\n    socket1.receive(buf, len);\n    sscanf(buf, \"227 PASV ok (%d,%d,%d,%d,%d,%d)\\n\", &c, &c, &c, &c, &a, &b);\n    dataport = (a << 8) + b;\n\n    // Open the IP address and port\n    if (!socket2.open(ip, dataport)) {\n        CVDRONE_ERROR(\"TCPSocket::open(port=%d) failed. (%s, %d)\\n\", dataport, __FILE__, __LINE__);\n        return 0;\n    }\n\n    // Send requests\n    socket1.sendf(\"RETR %s\\r\\n\\0\", \"version.txt\");\n\n    // Receive data\n    socket2.receive(buf, len);\n\n    // Get version information\n    sscanf(buf, \"%d.%d.%d\", &version.major, &version.minor, &version.revision);\n    //printf(\"AR.Drone Ver %d.%d.%d\\n\", major, minor, revision);\n\n    // See you\n    socket1.close();\n    socket2.close();\n\n    return 1;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get the version and the revision number\n//! @param   major A pointer to the major version variable\n//! @param   minor A pointer to the minor version variable\n//! @param   revision A pointer to the revision number variable\n//! @return  Major version of AR.Drone\n// --------------------------------------------------------------------------\nint ARDrone::getVersion(int *major, int *minor, int *revision)\n{\n    if (major) *major = version.major;\n    if (minor) *minor = version.minor;\n    if (revision) *revision = version.revision;\n    return version.major;\n}"
  },
  {
    "path": "src/ardrone/video.cpp",
    "content": "// -------------------------------------------------------------------------\n// CV Drone (= OpenCV + AR.Drone)\n// Copyright(C) 2016 puku0x\n// https://github.com/puku0x/cvdrone\n//\n// This source file is part of CV Drone library.\n//\n// This library is free software; you can redistribute it and/or\n// modify it under the terms of EITHER:\n// (1) The GNU Lesser General Public License as published by the Free\n//     Software Foundation; either version 2.1 of the License, or (at\n//     your option) any later version. The text of the GNU Lesser\n//     General Public License is included with this library in the\n//     file cvdrone-license-LGPL.txt.\n// (2) The BSD-style license that is included with this library in\n//     the file cvdrone-license-BSD.txt.\n// \n// This library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files\n// cvdrone-license-LGPL.txt and cvdrone-license-BSD.txt for more details.\n//\n//! @file   video.cpp\n//! @brief  Converting video into IplImage or cv::Mat\n//\n// -------------------------------------------------------------------------\n\n#include \"ardrone.h\"\n#include \"uvlc.h\"\n\n// The code decoding H.264 video is based on the following sites.\n// - An ffmpeg and SDL Tutorial - Tutorial 01: Making Screencaps -\n//   http://dranger.com/ffmpeg/tutorial01.html\n// - AR.Drone Development - 2.1.2 AR.Drone 2.0 Video Decording: FFMPEG + SDL2.0 -\n//   http://ardrone-ailab-u-tokyo.blogspot.jp/2012/07/212-ardrone-20-video-decording-ffmpeg.html\n\n// --------------------------------------------------------------------------\n//! @brief   Initialize video.\n//! @return  Result of initialization\n//! @retval  1 Success\n//! @retval  0 Failure\n// --------------------------------------------------------------------------\nint ARDrone::initVideo(void)\n{\n    // AR.Drone 2.0\n    if (version.major == ARDRONE_VERSION_2) {\n        // Open the IP address and port\n        char filename[256];\n        sprintf(filename, \"tcp://%s:%d\", ip, ARDRONE_VIDEO_PORT);\n        if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) < 0) {\n            CVDRONE_ERROR(\"avformat_open_input() was failed. (%s, %d)\\n\", __FILE__, __LINE__);\n            return 0;\n        }\n\n        // Retrive and dump stream information\n        avformat_find_stream_info(pFormatCtx, NULL);\n        av_dump_format(pFormatCtx, 0, filename, 0);\n\n        // Find the decoder for the video stream\n        pCodecCtx = pFormatCtx->streams[0]->codec;\n        AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);\n        if (pCodec == NULL) {\n            CVDRONE_ERROR(\"avcodec_find_decoder() was failed. (%s, %d)\\n\", __FILE__, __LINE__);\n            return 0;\n        }\n\n        // Open codec\n        if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {\n            CVDRONE_ERROR(\"avcodec_open2() was failed. (%s, %d)\\n\", __FILE__, __LINE__);\n            return 0;\n        }\n\n        // Allocate video frames and a buffer\n        #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)\n        pFrame = av_frame_alloc();\n        pFrameBGR = av_frame_alloc();\n        #else\n        pFrame = avcodec_alloc_frame();\n        pFrameBGR = avcodec_alloc_frame();\n        #endif\n        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));\n\n        // Assign appropriate parts of buffer to image planes in pFrameBGR\n        avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);\n\n        // Convert it to BGR\n        pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);\n    }\n    // AR.Drone 1.0\n    else {\n        // Open the IP address and port\n        if (!sockVideo.open(ip, ARDRONE_VIDEO_PORT)) {\n            CVDRONE_ERROR(\"UDPSocket::open(port=%d) was failed. (%s, %d)\\n\", ARDRONE_VIDEO_PORT, __FILE__, __LINE__);\n            return 0;\n        }\n\n        // Set codec\n        pCodecCtx = avcodec_alloc_context3(NULL);\n        pCodecCtx->width = 320;\n        pCodecCtx->height = 240;\n\n        // Allocate a buffer\n        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height));\n    }\n\n    // Allocate an IplImage\n    img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);\n    if (!img) {\n        CVDRONE_ERROR(\"cvCreateImage() was failed. (%s, %d)\\n\", __FILE__, __LINE__);\n        return 0;\n    }\n\n    // Clear the image\n    cvZero(img);\n\n    // Create a mutex\n    mutexVideo = new pthread_mutex_t;\n    pthread_mutex_init(mutexVideo, NULL);\n\n    // Create a thread\n    threadVideo = new pthread_t;\n    if (pthread_create(threadVideo, NULL, runVideo, this) != 0) {\n        CVDRONE_ERROR(\"pthread_create() was failed. (%s, %d)\\n\", __FILE__, __LINE__);\n        return 0;\n    }\n\n    return 1;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Thread function for video.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::loopVideo(void)\n{\n    while (1) {\n        // Get video stream\n        if (!getVideo()) break;\n        pthread_testcancel();\n        msleep(1);\n    }\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get AR.Drone's video stream.\n//! @return  Result of this function\n//! @retval  1 Success\n//! @retval  0 Failure\n// --------------------------------------------------------------------------\nint ARDrone::getVideo(void)\n{\n    // AR.Drone 2.0\n    if (version.major == ARDRONE_VERSION_2) {\n        AVPacket packet;\n        int frameFinished = 0;\n\n        // Read all frames\n        while (av_read_frame(pFormatCtx, &packet) >= 0) {\n            // Decode the frame\n            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);\n\n            // Decoded all frames\n            if (frameFinished) {\n                // Convert to BGR\n                if (mutexVideo) pthread_mutex_lock(mutexVideo);\n                sws_scale(pConvertCtx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameBGR->data, pFrameBGR->linesize);\n                newImage = true;\n                if (mutexVideo) pthread_mutex_unlock(mutexVideo);\n\n                // Free the packet and break immidiately\n                av_free_packet(&packet);\n                return 1;\n                //break;\n            }\n\n            // Free the packet\n            av_free_packet(&packet);\n        }\n        return 0;\n    }\n    // AR.Drone 1.0\n    else {\n        // Send request\n        sockVideo.sendf(\"\\x01\\x00\\x00\\x00\");\n\n        // Receive data\n        uint8_t buf[122880];\n        int size = sockVideo.receive((void*)&buf, sizeof(buf));\n\n        // Received something\n        if (size > 0) {\n            // Decode UVLC video\n            if (mutexVideo) pthread_mutex_lock(mutexVideo);\n            UVLC::DecodeVideo(buf, size, bufferBGR, &pCodecCtx->width, &pCodecCtx->height);\n            if (mutexVideo) pthread_mutex_unlock(mutexVideo);\n        }\n    }\n\n    return 1;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Get an image from the AR.Drone's camera.\n//! @return  An OpenCV image data (IplImage or cv::Mat)\n//! @retval  NULL Failure\n// --------------------------------------------------------------------------\nARDRONE_IMAGE ARDrone::getImage(void)\n{\n    // There is no image\n    if (!img) return ARDRONE_IMAGE(NULL);\n\n    // Enable mutex lock\n    if (mutexVideo) pthread_mutex_lock(mutexVideo);\n\n    // AR.Drone 2.0\n    if (version.major == ARDRONE_VERSION_2) {\n        // Copy current frame to an IplImage\n        memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);\n    }\n    // AR.Drone 1.0\n    else {\n        // If the sizes of the buffer and the IplImage are differnt\n        if (pCodecCtx->width != img->width || pCodecCtx->height != img->height) {\n            // Resize the image to 320x240\n            IplImage *small_img = cvCreateImageHeader(cvSize(pCodecCtx->width, pCodecCtx->height), IPL_DEPTH_8U, 3);\n            small_img->imageData = (char*)bufferBGR;\n            cvResize(small_img, img, CV_INTER_CUBIC);\n            cvReleaseImageHeader(&small_img);\n        }\n        // For 320x240 image, just copy it\n        else memcpy(img->imageData, bufferBGR, pCodecCtx->width * pCodecCtx->height * sizeof(uint8_t) * 3);\n    }\n    \n    // The latest image has been read, so change newImage accordingly\n    newImage = false;\n\n    // Disable mutex lock\n    if (mutexVideo) pthread_mutex_unlock(mutexVideo);\n\n    return ARDRONE_IMAGE(img);\n}\n\n// --------------------------------------------------------------------------\n//! @brief   A variation of getImage() like cv::VideoCapture.\n//! @return  An OpenCV image data (cv::Mat)\n// --------------------------------------------------------------------------\nARDrone& ARDrone::operator >> (cv::Mat &image)\n{\n    image = getImage();\n    return *this;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Check whether we have received a new image since the last getImage().\n//! @return  A bool that is true if we have received a new image and false if we have not\n// --------------------------------------------------------------------------\nbool ARDrone::willGetNewImage(void)\n{\n    // Enable mutex lock\n    if (mutexVideo) pthread_mutex_lock(mutexVideo);\n    \n    bool answer = newImage;\n    \n    // Disable mutex lock\n    if (mutexVideo) pthread_mutex_unlock(mutexVideo);\n    \n    return answer;\n}\n\n// --------------------------------------------------------------------------\n//! @brief   Finalize video.\n//! @return  None\n// --------------------------------------------------------------------------\nvoid ARDrone::finalizeVideo(void)\n{\n    // Destroy the thread\n    if (threadVideo) {\n        pthread_cancel(*threadVideo);\n        pthread_join(*threadVideo, NULL);\n        delete threadVideo;\n        threadVideo = NULL;\n    }\n\n    // Delete the mutex\n    if (mutexVideo) {\n        pthread_mutex_destroy(mutexVideo);\n        delete mutexVideo;\n        mutexVideo = NULL;\n    }\n\n    // Release the IplImage\n    if (img) {\n        cvReleaseImage(&img);\n        img = NULL;\n    }\n\n    // AR.Drone 2.0\n    if (version.major == ARDRONE_VERSION_2) {\n        // Deallocate the frame\n        if (pFrame) {\n            #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)\n            av_frame_free(&pFrame);\n            #else\n            avcodec_free_frame(&pFrame);\n            #endif\n            pFrame = NULL;\n        }\n\n        // Deallocate the frame\n        if (pFrameBGR) {\n            #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)\n            av_frame_free(&pFrameBGR);\n            #else\n            avcodec_free_frame(&pFrameBGR);\n            #endif\n            pFrameBGR = NULL;\n        }\n\n        // Deallocate the buffer\n        if (bufferBGR) {\n            av_free(bufferBGR);\n            bufferBGR = NULL;\n        }\n\n        // Deallocate the convert context\n        if (pConvertCtx) {\n            sws_freeContext(pConvertCtx);\n            pConvertCtx = NULL;\n        }\n\n        // Deallocate the codec\n        if (pCodecCtx) {\n            avcodec_close(pCodecCtx);\n            pCodecCtx = NULL;\n        }\n\n        // Deallocate the format context\n        if (pFormatCtx) {\n            avformat_close_input(&pFormatCtx);\n            pFormatCtx = NULL;\n        }\n    }\n    // AR.Drone 1.0\n    else {\n        // Deallocate the buffer\n        if (bufferBGR) {\n            av_free(bufferBGR);\n            bufferBGR = NULL;\n        }\n\n        // Deallocate the codec\n        if (pCodecCtx) {\n            avcodec_close(pCodecCtx);\n            pCodecCtx = NULL;\n        }\n\n        // Close the socket\n        sockVideo.close();\n    }\n}\n"
  },
  {
    "path": "src/main.cpp",
    "content": "#include \"ardrone/ardrone.h\"\r\n\r\n// --------------------------------------------------------------------------\r\n// main(Number of arguments, Argument values)\r\n// Description  : This is the entry point of the program.\r\n// Return value : SUCCESS:0  ERROR:-1\r\n// --------------------------------------------------------------------------\r\nint main(int argc, char *argv[])\r\n{\r\n    // AR.Drone class\r\n    ARDrone ardrone;\r\n\r\n    // Initialize\r\n    if (!ardrone.open()) {\r\n        std::cout << \"Failed to initialize.\" << std::endl;\r\n        return -1;\r\n    }\r\n\r\n    // Battery\r\n    std::cout << \"Battery = \" << ardrone.getBatteryPercentage() << \"[%]\" << std::endl;\r\n\r\n    // Instructions\r\n    std::cout << \"***************************************\" << std::endl;\r\n    std::cout << \"*       CV Drone sample program       *\" << std::endl;\r\n    std::cout << \"*           - How to play -           *\" << std::endl;\r\n    std::cout << \"***************************************\" << std::endl;\r\n    std::cout << \"*                                     *\" << std::endl;\r\n    std::cout << \"* - Controls -                        *\" << std::endl;\r\n    std::cout << \"*    'Space' -- Takeoff/Landing       *\" << std::endl;\r\n    std::cout << \"*    'Up'    -- Move forward          *\" << std::endl;\r\n    std::cout << \"*    'Down'  -- Move backward         *\" << std::endl;\r\n    std::cout << \"*    'Left'  -- Turn left             *\" << std::endl;\r\n    std::cout << \"*    'Right' -- Turn right            *\" << std::endl;\r\n    std::cout << \"*    'Q'     -- Move upward           *\" << std::endl;\r\n    std::cout << \"*    'A'     -- Move downward         *\" << std::endl;\r\n    std::cout << \"*                                     *\" << std::endl;\r\n    std::cout << \"* - Others -                          *\" << std::endl;\r\n    std::cout << \"*    'C'     -- Change camera         *\" << std::endl;\r\n    std::cout << \"*    'Esc'   -- Exit                  *\" << std::endl;\r\n    std::cout << \"*                                     *\" << std::endl;\r\n    std::cout << \"***************************************\" << std::endl;\r\n\r\n    while (1) {\r\n        // Key input\r\n        int key = cv::waitKey(33);\r\n        if (key == 0x1b) break;\r\n\r\n        // Get an image\r\n        cv::Mat image = ardrone.getImage();\r\n\r\n        // Take off / Landing \r\n        if (key == ' ') {\r\n            if (ardrone.onGround()) ardrone.takeoff();\r\n            else                    ardrone.landing();\r\n        }\r\n\r\n        // Move\r\n        double vx = 0.0, vy = 0.0, vz = 0.0, vr = 0.0;\r\n        if (key == 'i' || key == CV_VK_UP)    vx =  1.0;\r\n        if (key == 'k' || key == CV_VK_DOWN)  vx = -1.0;\r\n        if (key == 'u' || key == CV_VK_LEFT)  vr =  1.0;\r\n        if (key == 'o' || key == CV_VK_RIGHT) vr = -1.0;\r\n        if (key == 'j') vy =  1.0;\r\n        if (key == 'l') vy = -1.0;\r\n        if (key == 'q') vz =  1.0;\r\n        if (key == 'a') vz = -1.0;\r\n        ardrone.move3D(vx, vy, vz, vr);\r\n\r\n        // Change camera\r\n        static int mode = 0;\r\n        if (key == 'c') ardrone.setCamera(++mode % 4);\r\n\r\n        // Display the image\r\n        cv::imshow(\"camera\", image);\r\n    }\r\n\r\n    // See you\r\n    ardrone.close();\r\n\r\n    return 0;\r\n}"
  },
  {
    "path": "src/resource/resource.rc",
    "content": "/////////////////////////////////////////////////////////////////////////////\r\n//\r\n// Icon\r\n//\r\nIDI_ICON          ICON          \"icon.ico\"\r\n\r\n/////////////////////////////////////////////////////////////////////////////\r\n//\r\n// Version\r\n//\r\n#define VER_COMMENT             \"AR.Drone with OpenCV Test\"\r\n#define VER_COMPANYNAME         \"https://github.com/puku0x/cvdrone\"\r\n#define VER_FILEDESCRIPTION     \"AR.Drone with OpenCV\"\r\n#define VER_FILEVERSION         \"1.0\"\r\n#define VER_INTERNAL_NAME       \"Test\"\r\n#define VER_LEGALCOPYRIGHT      \"Copyright(C) 2013 puku0x\"\r\n#define VER_ORIGINAL_FILENAME   \"test.exe\"\r\n#define VER_PRODUCTNAME         \"test\"\r\n#define VER_PRODUCTVERSION      \"1.0\"\r\n\r\n1 VERSIONINFO\r\n FILEVERSION 1,0,0,0\r\n PRODUCTVERSION 1,0,0,0\r\n FILEFLAGSMASK 0x0L\r\n#ifdef _DEBUG\r\n FILEFLAGS 0x1L\r\n#else\r\n FILEFLAGS 0x0L\r\n#endif\r\n FILEOS 0x0L\r\n FILETYPE 0x0L\r\n FILESUBTYPE 0x0L\r\nBEGIN\r\n    BLOCK \"StringFileInfo\"\r\n    BEGIN\r\n        BLOCK \"041104B0\"\r\n        BEGIN\r\n            VALUE \"Comments\",         VER_COMMENT\r\n            VALUE \"CompanyName\",      VER_COMPANYNAME\r\n            VALUE \"FileDescription\",  VER_FILEDESCRIPTION\r\n            VALUE \"FileVersion\",      VER_FILEVERSION\r\n            VALUE \"InternalName\",     VER_INTERNAL_NAME\r\n            VALUE \"LegalCopyright\",   VER_LEGALCOPYRIGHT\r\n            VALUE \"OriginalFilename\", VER_ORIGINAL_FILENAME\r\n            VALUE \"ProductName\",      VER_PRODUCTNAME\r\n            VALUE \"ProductVersion\",   VER_PRODUCTVERSION\r\n        END\r\n    END\r\n    BLOCK \"VarFileInfo\"\r\n    BEGIN\r\n        VALUE \"Translation\", 0x411, 1200\r\n    END\r\nEND"
  },
  {
    "path": "src/resource/test.exe.manifest",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\r\n<assembly xmlns=\"urn:schemas-microsoft-com:asm.v1\" manifestVersion=\"1.0\">\r\n<assemblyIdentity\r\n  version=\"1.0.0.0\"\r\n  processorArchitecture=\"X86\"\r\n  name=\"Player.exe\"\r\n/>\r\n<description>Player Ver.15</description>\r\n<dependency>\r\n  <dependentAssembly>\r\n    <assemblyIdentity\r\n      type=\"win32\"\r\n      name=\"Microsoft.Windows.Common-Controls\"\r\n      version=\"6.0.0.0\"\r\n      processorArchitecture=\"X86\"\r\n      publicKeyToken=\"6595b64144ccf1df\"\r\n      language=\"*\"\r\n    />\r\n  </dependentAssembly>\r\n</dependency>\r\n</assembly>"
  }
]